max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Desafio022.py | MarcioRSanches/Estudos_Python | 0 | 12787851 | nomecompleto = str(input('Digite o seu nome completo: ')).strip()
print('Analisando seu nome....')
print('Seu nome em maiúsculas é {} '.format(nomecompleto.upper()))
print('Seu nome em minúsculas é {} '.format(nomecompleto.lower()))
print('Seu nome apenas com a primeira letra maiúscula é {}'.format(nomecompleto.capitalize()))
print('Seu nome e sobrenome com a primeira letra maiúscula é {} '.format(nomecompleto.title()))
print('Seu nome tem ao todo, {} letras'.format(len(nomecompleto)-nomecompleto.count(' '))) # conta quantos caracteres removendo os espaços antes de depois da frase
print('Seu primeiro nome tem, {} letras'.format(nomecompleto.find(' ')))
separa = nomecompleto.split()
print(separa)
print('Seu primeiro nome é {} e tem {} letras'.format(separa[0], len(separa[0])))
| 4.09375 | 4 |
configs/deepsvg/defaults_fonts.py | naoto0804/deepsvg | 573 | 12787852 | from .default_icons import *
class Config(Config):
def __init__(self, num_gpus=1):
super().__init__(num_gpus=num_gpus)
# Dataset
self.data_dir = "./dataset/fonts_tensor/"
self.meta_filepath = "./dataset/fonts_meta.csv"
| 1.984375 | 2 |
enqueue.py | alex-groshev/SpyPy | 0 | 12787853 | <reponame>alex-groshev/SpyPy<gh_stars>0
#!/usr/bin/env python
import sys
import pika
from bson.json_util import dumps
from confspy import ConfSpyPy
from dataspy import DataSpyPy
def main():
if len(sys.argv) < 2:
print 'Please, specify a number of records to enqueue and regular expression (optional)!'
sys.exit(1)
configs = ConfSpyPy.load('spypy.cfg')
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue=configs['queue'], durable=False)
dataspypy = DataSpyPy(configs['host'], configs['port'])
records = dataspypy.get_unprocessed_records(int(sys.argv[1]), sys.argv[2] if len(sys.argv) == 3 else None)
for record in records:
print 'Enqueuing %s' % record['domain']
channel.basic_publish(exchange='', routing_key=configs['queue'], body=dumps(record))
connection.close()
if __name__ == '__main__':
main()
| 2.390625 | 2 |
urdf2optcontrol/__init__.py | abcamiletto/urdf2optcontrol | 0 | 12787854 | from urdf2optcontrol.optimizer import optimizer
| 1.171875 | 1 |
var/spack/repos/builtin/packages/py-pebble/package.py | lguyot/spack | 9 | 12787855 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPebble(PythonPackage):
"""Python API to manage threads and processes within an application."""
homepage = "https://github.com/noxdafox/pebble"
url = "https://pypi.io/packages/source/p/pebble/Pebble-4.5.0.tar.gz"
git = "https://github.com/noxdafox/pebble.git"
version('4.5.0', sha256='2de3cd11aa068e0c4a4abbaf8d4ecfdac409d8bfb78a4c211a01f6a4fb17a35f')
version('4.4.1', sha256='7c4d68a3479140cba74d7454d8190e2cb1a93213b44b5befe3c53c201beb8317')
version('4.3.10', sha256='c39a7bf99af6525fcf0783a8859fb10a4f20f4f988ddb66fd6fa7588f9c91731')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-futures', type='run', when='^python@:2.9.9')
| 1.515625 | 2 |
example.py | BestPig/flask-rangerequest | 3 | 12787856 | <reponame>BestPig/flask-rangerequest<gh_stars>1-10
#!/usr/bin/env python3
from argparse import ArgumentParser
from datetime import datetime
from flask import Flask
from os import path
from flask_rangerequest import RangeRequest
def main() -> None:
args = arg_parser().parse_args()
app = create_app(args.file)
app.run(host='127.0.0.1', port=8080, debug=True)
def arg_parser() -> ArgumentParser:
parser = ArgumentParser(path.basename(__file__),
description='Run an RFC 7233 enabled webserver.')
parser.add_argument('-f', '--file', default=__file__)
return parser
def create_app(file_) -> Flask:
app = Flask(__name__)
size = path.getsize(file_)
with open(file_, 'rb') as f:
etag = RangeRequest.make_etag(f)
last_modified = datetime.utcnow()
@app.route('/', methods=('GET', 'POST'))
def index():
return RangeRequest(open(file_, 'rb'),
etag=etag,
last_modified=last_modified,
size=size).make_response()
return app
if __name__ == '__main__':
main()
| 2.875 | 3 |
tenant_workspace/apps.py | smegurus/smegurus-django | 1 | 12787857 | <reponame>smegurus/smegurus-django
from django.apps import AppConfig
class TenantWorkspaceConfig(AppConfig):
name = 'tenant_workspace'
| 1.335938 | 1 |
python/plot_helpers.py | xandaschofield/text-duplication | 0 | 12787858 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
from collections import defaultdict
import os
import re
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
from pandas import DataFrame
import scipy.stats
import seaborn as sns
import lda_metrics
N_PROPS_LIST = ['None', 0.001, 0.01, 0.1]
N_FREQS_LIST = [1, 2, 4, 8]
N_TOPICS_LIST = [5, 10, 20, 40, 80, 160, 320]
sns.set(style='whitegrid', context='poster')
to_print_name = {
'reusl-train': 'REUSL 25k',
'reusl-short': 'REUSL 2.5k',
'nyt-train': 'NYT 25k',
'nyt-short': 'NYT 2.5k',
}
def validate_fname(
fname,
extension,
file_prefix=None,
process=None,
n_topics=None,
n_props_list=N_PROPS_LIST,
n_freqs_list=N_FREQS_LIST,
n_topics_list=N_TOPICS_LIST):
if not fname.startswith(file_prefix + '-'):
return None
is_seq_file = (extension == 'txt')
is_exact_duplicate = (len(fname.split('-')) == 6 - int(is_seq_file))
if is_exact_duplicate:
if is_seq_file:
fname_regex = r'[a-z\-]+(?P<proc_id>\d+)-(?P<prop>[\d.]+|None)-(?P<freq>\d+).' + extension
else:
fname_regex = r'[a-z\-]+(?P<proc_id>\d+)-(?P<prop>[\d.]+|None)-(?P<freq>\d+)-(?P<topic_ct>\d+).' + extension
else:
if is_seq_file:
fname_regex = r'[a-z\-]+(?P<proc_id>\d+)-(?P<prop>[\d.]+|None).' + extension
else:
fname_regex = r'[a-z\-]+(?P<proc_id>\d+)-(?P<prop>[\d.]+|None)-(?P<topic_ct>\d+).' + extension
match_obj = re.match(fname_regex, fname)
if match_obj is None:
return None
ret_dict = {}
proc_id = int(match_obj.group('proc_id'))
if process is not None and proc_id != process:
return None
else:
ret_dict['proc_id'] = proc_id
prop = match_obj.group('prop')
if prop != 'None':
prop = float(prop)
if prop not in n_props_list:
return None
else:
ret_dict['prop'] = prop
if not is_seq_file:
topic_ct = int(match_obj.group('topic_ct'))
if not (n_topics is None) and topic_ct != n_topics:
return None
elif not (topic_ct in n_topics_list):
return None
else:
ret_dict['topic_ct'] = topic_ct
if is_exact_duplicate:
freq = int(match_obj.group('freq'))
if freq not in n_freqs_list:
return None
else:
ret_dict['freq'] = freq
return ret_dict
def make_entity_from_fields(n_topics, val, label, fields):
return {
'proportion': fields['prop'],
'c': fields.get('freq', 0),
'K': n_topics,
'process_id': fields['proc_id'],
'value': val,
'label': label
}
def print_significances(entities):
val_collection = defaultdict(list)
for entity in entities:
key = "{} {} {} {}".format(
entity['label'],
entity['proportion'],
entity['k'],
entity['c'])
val_collection[key].append(entity['value'])
for key in sorted(val_collection.keys()):
print(key, np.mean(val_collection[key]), 1.96*scipy.stats.sem(val_collection[key]))
def plot_cmap_from_entity_list(entities, save_file, vmax=1.0, value_name="value"):
plt.figure(figsize=(25, 15))
if not entities:
raise ValueError("No entities in list")
dataf = DataFrame([e for e in entities])
g = sns.FacetGrid(
dataf,
col='k',
row='label')
cbar_ax = g.fig.add_axes([.92, .3, .02, .4])
g.map_dataframe(facet_heatmap, cbar_ax=cbar_ax, vmax=vmax)
g.set_titles(col_template="{col_name} topics", row_template="{row_name}")
g.fig.subplots_adjust(right=.9)
plt.savefig(save_file)
def plot_pplot_from_entity_list(entities, save_file, value_name="value"):
plt.figure(figsize=(25, 15))
if not entities:
raise ValueError("No entities in list")
dataf = DataFrame([e for e in entities])
g = sns.factorplot(
x='c',
y='value',
hue='proportion',
col='k',
row='label',
capsize=.2,
markers='.',
scale=0.5,
data=dataf)
g.set_titles(col_template="{col_name} topics", row_template="{row_name}")
g.set_axis_labels("# copies", value_name)
plt.savefig(save_file)
def print_data_table(entities):
dataf = DataFrame([e for e in entities])
data = dataf.pivot_table(index='proportion', columns='c', values='value')
print(data)
def facet_heatmap(data, color, vmax=1.0, **kws):
data = data.pivot_table(index='proportion', columns='c', values='value')
sns.heatmap(data, cmap='Blues', annot=True, fmt=".2f", vmin=0, vmax=vmax, **kws)
| 2.375 | 2 |
askci/apps/base/views/errors.py | hpsee/askci | 3 | 12787859 | <filename>askci/apps/base/views/errors.py
"""
Copyright (C) 2019-2020 <NAME>.
This Source Code Form is subject to the terms of the
Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from django.shortcuts import render
def handler404(request, exception):
response = render(request, "base/404.html", {})
response.status_code = 404
return response
def handler500(request):
response = render(request, "base/500.html", {})
response.status_code = 500
return response
| 1.804688 | 2 |
fakeitems.py | CPriya14/Catalog_Project | 1 | 12787860 | <gh_stars>1-10
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy_utils import database_exists, drop_database, create_database
from database_setup import Category, CategoryItem, User, Base
engine = create_engine('sqlite:///itemcatalog.db')
# Clear database
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create dummy user
user1 = User(name="<NAME>", email="<EMAIL>",
picture='https://pbs.twimg.com/profile_images/2671170543/18debd694829ed78203a5a36dd364160_400x400.png')
session.add(user1)
session.commit()
# Items for Strings
category1 = Category(name="strings", user_id=1)
session.add(category1)
session.commit()
item1 = CategoryItem(name="violin", user_id=1, description="Violin, family of stringed musical instruments having wooden bodies whose backs and fronts are slightly convex, the fronts pierced by two-shaped resonance holes. The instruments of the violin family have been the dominant bowed instruments because of their versatility, brilliance, and balance of tone, and their wide dynamic range. A variety of sounds may be produced, e.g., by different types of bowing or by plucking the string (see pizzicato). The violin has always been the most important member of the family, from the beginning being the principal orchestral instrument and holding an equivalent position in chamber music and as a solo instrument. The technique of the violin was developed much earlier than that of the viola or cello.", category=category1)
session.add(item1)
session.commit()
item2 = CategoryItem(name="viola", user_id=1, description="The viola is the alto instrument of the violin family (violin, viola, cello). It is constructed using the same components as the violin, the only difference being the larger size. ... In other words, the viola is too small in proportion to its tuning and this is the reason for its distinctive timbre.", category=category1)
session.add(item2)
session.commit()
item3 = CategoryItem(name="cello", user_id=1, description="The cello is used as a solo musical instrument, as well as in chamber music ensembles, string orchestras, as a member of the string section of symphony orchestras, and some rock bands. It is the second-largest bowed string instrument in the modern symphony orchestra, the double bass being the largest.", category=category1)
session.add(item3)
session.commit()
# Items for Woodwinds
category2 = Category(name="woodwinds", user_id=1)
session.add(category2)
session.commit()
item1 = CategoryItem(name="flute", user_id=1, description="A musical wind instrument consisting of a tube with a series of fingerholes or keys, in which the wind is directed against a sharp edge, either directly, as in the modern transverse flute, or through a flue, as in the recorder. an organ stop with wide flue pipes, having a flutelike tone.", category=category2)
session.add(item1)
session.commit()
item2 = CategoryItem(name="piccolo", user_id=1, description="Piccolo, in full flauto piccolo, highest-pitched woodwind instrument of orchestras and military bands. It is a small transverse (horizontally played) flute of conical or cylindrical bore, fitted with Boehm-system keywork and pitched an octave higher than the ordinary concert flute.", category=category2)
session.add(item2)
session.commit()
item3 = CategoryItem(name="oboe", user_id=1, description="The oboe is a woodwind instrument in the soprano register. The blowing end of the oboe's slim conical tube (head) turns into a small metal pipe to which two reeds are affixed.", category=category2)
session.add(item3)
session.commit()
# Items for Percussion
category3 = Category(name="percussion", user_id=1)
session.add(category3)
session.commit()
item1 = CategoryItem(name="marimba", user_id=1, description="The marimba is a percussion instrument consisting of a set of wooden bars struck with mallets to produce musical tones. Resonators suspended underneath the bars amplify their sound. ... This instrument is a type of idiophone, but with a more resonant and lower-pitched tessitura than the xylophone.", category=category3)
session.add(item1)
session.commit()
item2 = CategoryItem(name="timpani", user_id=1, description="Timpani or kettledrums are musical instruments in the percussion family. A type of drum, they consist of a skin called a head stretched over a large bowl traditionally made of copper. They are played by striking the head with a specialized drum stick called a timpani stick or timpani mallet. Timpani evolved from military drums to become a staple of the classical orchestra by the last third of the 18th century. Today, they are used in many types of musical ensembles, including concert bands, marching bands, orchestras, and even in some rock.", category=category3)
session.add(item2)
session.commit()
item3 = CategoryItem(name="xylophone", user_id=1, description="The xylophone is a musical instrument in the percussion family that consists of wooden bars struck by mallets.", category=category3)
session.add(item3)
session.commit()
# Items for Brass
category4 = Category(name="brass", user_id=1)
session.add(category4)
session.commit()
categories = session.query(Category).all()
for category in categories:
print "Category: " + category.name | 2.828125 | 3 |
umar.py | UMarda/CP19_025 | 1 | 12787861 | ##################################Question 4#####################################
a=input("Enter the paragraph : ")
b=" "
l=len(a)
for i in range(l):
if i==0: # capital first letter of para
b+=a[i].upper()
if a[i]=="." and i+1!=l: # after the full stop without give a space
b+="."
c=a[i+1].upper() # again capital the letter
b+=c
else:
d=len(b)
if b[d-1]!=a[i].upper():
b+=a[i]
print(b)
#################################################################################
| 3.609375 | 4 |
app/db/abstracts.py | lukecyx/fastlms | 0 | 12787862 | from abc import ABC
class CollectionManager(ABC)
| 1.398438 | 1 |
idact/detail/dask/dask_worker_deployment.py | intdata-bsc/idact | 5 | 12787863 | <gh_stars>1-10
"""This module contains the implementation of a Dask worker deployment."""
from contextlib import ExitStack
from idact.core.config import ClusterConfig
from idact.detail.deployment.cancel_on_exit import cancel_on_exit
from idact.detail.deployment.deserialize_generic_deployment import \
deserialize_generic_deployment
from idact.detail.deployment.generic_deployment import GenericDeployment
from idact.detail.helper.stage_info import stage_info
from idact.detail.log.get_logger import get_logger
from idact.detail.serialization.serializable import Serializable
from idact.detail.serialization.serializable_types import SerializableTypes
from idact.detail.tunnel.tunnel_internal import TunnelInternal
class DaskWorkerDeployment(Serializable):
"""Deployment of a Dask worker on a node."""
def __init__(self,
deployment: GenericDeployment,
bokeh_tunnel: TunnelInternal):
self._deployment = deployment
self._bokeh_tunnel = bokeh_tunnel
@property
def bokeh_tunnel(self) -> TunnelInternal:
"""Bokeh diagnostics server tunnel."""
return self._bokeh_tunnel
def cancel(self):
"""Cancels the scheduler deployment."""
log = get_logger(__name__)
with ExitStack() as stack:
stack.enter_context(
stage_info(log, "Cancelling worker deployment on %s.",
self._deployment.node.host))
stack.enter_context(cancel_on_exit(self._deployment))
self.cancel_local()
def cancel_local(self):
"""Closes the tunnel, but does not cancel the deployment."""
self._bokeh_tunnel.close()
@property
def deployment(self) -> GenericDeployment:
"""Generic deployment."""
return self._deployment
def serialize(self) -> dict:
return {'type': str(SerializableTypes.DASK_WORKER_DEPLOYMENT),
'deployment': self._deployment.serialize(),
'bokeh_tunnel_here': self._bokeh_tunnel.here,
'bokeh_tunnel_there': self._bokeh_tunnel.there}
@staticmethod
def deserialize(config: ClusterConfig,
serialized: dict) -> 'DaskWorkerDeployment':
try:
assert serialized['type'] == str(
SerializableTypes.DASK_WORKER_DEPLOYMENT)
deployment = deserialize_generic_deployment(
config=config,
serialized=serialized['deployment'])
bokeh_tunnel = deployment.node.tunnel(
there=serialized['bokeh_tunnel_there'],
here=serialized['bokeh_tunnel_here'])
return DaskWorkerDeployment(
deployment=deployment,
bokeh_tunnel=bokeh_tunnel)
except KeyError as e:
raise RuntimeError("Unable to deserialize.") from e
def __eq__(self, other):
return self.__dict__ == other.__dict__
| 1.96875 | 2 |
bot/constants/messages.py | eyobofficial/Gebeya-Schedule-Bot | 3 | 12787864 | welcome_message = \
"""
👋🏼 Hi {}! You are currently enrolled to the {} track \
during the {}-time session.
*To checkout your class schedules:*
/today - get class schedules for today
/tomorrow - get class schedules for tomorrow
/week - get class schedules for 1 week starting from today
/month - get class schedules for 1 month starting from today
*To manage your track or session:*
/me - get your account details
/track - set or change your track
/session - set or change your class session
*For more commands:*
/start - get started with me (the bot)
/about - get brief description about me (the bot)
/help - get the list of all commands
/credits - get the list of contributors & developers
"""
help_message = \
"""
*To checkout your class schedules:*
/today - get class schedules for today
/tomorrow - get class schedules for tomorrow
/week - get class schedules for 1 week starting from today
/month - get class schedules for 1 month starting from today
*To manage your track or session:*
/me - get your account details
/track - set or change your track
/session - set or change your class session
*For more commands:*
/start - get started with me (the bot)
/about - get brief description about me (the bot)
/help - get the list of all commands
/credits - get the list of contributors & developers
"""
session_missing = \
"""
But I don't know your class session yet. Use the /session command to set it.
"""
track_missing = \
"""
But I don't know your track yet. Use the /track command to set it.
"""
me = \
"""
*Name:* {}
*Track:* {}
*Session: *{}
You can update your track and session using the /track & /session commands.
"""
credits_message = \
"""
**DEVELOPERS**
👤 <NAME>
✉️ <EMAIL>
📱 +251 911 42 78 05
"""
about = \
"""
👋🏼 My name is `Gebeya Class Bot`. I will help you to easily get your \
`daily`, `weekly` and `monthly` Gebeya class schedules. Get started with me by \
firing the /start command.
"""
| 1.78125 | 2 |
backend/flask-server/app.py | WatVis/EDAssistant | 0 | 12787865 | from flask import Flask
from flask import request, jsonify
import numpy as np
import torch
from flask_cors import CORS, cross_origin
import socket
import argparse
import random
import json
import re
from tokenize_code import tokenize_code
from serverHelpers import notebook_to_frontend
from gensim.models.doc2vec import Doc2Vec
from model import BertModel, Generator
from RetrievalDB_doc2vec import RetrievalDB_doc2vec, inferenceRNN_doc2vec
from RetrievalDB_CodeBERT import RetrievalDB_CodeBERT, inferenceRNN_CodeBERT
# Get the path to the data
PATH_TO_SLICED_SCRIPTS = '../../yizhi/EDA/kaggle-dataset/sliced-notebooks-full-new'
PATH_TO_NOTEBOOKS = '../../yizhi/EDA/kaggle-dataset/notebooks-full'
PATH_TO_CODEBERT_MODELS = '../../yizhi/EDA/EDA-prediction/'
# retrievalDB_doc2vec = RetrievalDB_doc2vec()
retrievalDB_CodeBERT = RetrievalDB_CodeBERT(PATH_TO_CODEBERT_MODELS)
app = Flask(__name__)
CORS(app)
def randomSublists(someList):
resultList = [] #result container
index = 0 #start at the start of the list
length = len(someList) #and cache the length for performance on large lists
while (index < length):
randomNumber = np.random.randint(1, length-index+1) #get a number between 1 and the remaining choices
resultList.append(someList[index:index+randomNumber]) #append a list starting at index with randomNumber length to it
index = index + randomNumber #increment index by amount of list used
return resultList #return the list of randomized sublists
def create_app():
@app.route("/", methods=["GET"])
def index():
return "SmartEDA API Server"
@app.route("/generate_answer", methods=["GET","POST"])
def generate_answer():
#nl_input = request.form['input']
files_to_read = ['2.ipynb', '11111.ipynb', '8570777.ipynb', '9582250.ipynb', '10269993.ipynb']
store = []
for file_name in files_to_read:
file = open("examples/" + file_name)
line = file.read()
file.close()
store.append(line)
json_parsed = []
for file_content in store:
json_parsed.append(json.loads(file_content))
all_ops = []
all_op_type = []
all_if_demon = []
for notebook in json_parsed:
cells = notebook['cells']
operations = []
one_op_type = []
one_if_demon = []
for a_cell in cells:
# a code cell
if a_cell['cell_type'] == 'code':
for a_line in a_cell['source']:
# a line of code
replaced_line = a_line.replace('"', '@').replace("'", '@')
if replaced_line[-1] != '\n':
operations.append(replaced_line + '\n')
else:
operations.append(replaced_line)
one_op_type.append(np.random.randint(4) + 1)
one_if_demon.append(np.random.randint(2))
all_ops.append(operations)
all_op_type.append(one_op_type)
all_if_demon.append(one_if_demon)
all_keywords = []
for j in range(len(all_if_demon)):
one_notebook = all_if_demon[j]
a_keyword = []
length = len(one_notebook)
i = 0
while i < length:
if one_notebook[i] == 0:
i += 1
# skip
else:
start = i
end = start
while i < length:
if one_notebook[i] == 1:
# no worries, just check if it is the end
if i == length - 1:
# 1 all the way to the end.
end = i
else:
# 0, time to stop
i = i - 1
end = i
break
i = i + 1
try:
a_keyword.append(random.choice(re.sub("[^a-zA-Z]+", " ", ' '.join(all_ops[j][start:end+1])).split()))
except:
a_keyword.append('random_stuff')
i += 1
all_keywords.append(a_keyword)
response = jsonify(all_operation_types=all_op_type,
all_operations=all_ops,
all_if_demonstrated=all_if_demon,
all_kwds=all_keywords)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route("/predict_next", methods=["POST"])
def predict_next():
if request.method == "POST":
print("Inferring next sequence")
# Axios request body is {notebook: stringified json}
# So we need to access the notebook field and parse it with json.loads
notebookSrc = json.loads(request.get_json()['notebook'])
print("notebooksrc json is", notebookSrc)
print("Notebook is", notebookSrc.keys())
# Do inference
topNotebooks = inferenceRNN_CodeBERT(notebookSrc, retrievalDB_CodeBERT, PATH_TO_CODEBERT_MODELS)
notebook_filepaths = []
# Parse the returned results
for (name, seqNum ) in topNotebooks:
# Name format is "competition\filename_seqNum"
competition = name.split('\\')[0]
filename_and_idx = name.split('\\')[1]
filename = filename_and_idx.split('_')[0]
idx = filename_and_idx.split('_')[1]
filepath = PATH_TO_NOTEBOOKS + '/' + competition + '/' + filename + '.ipynb'
notebook_filepaths.append(filepath)
data_to_frontend = notebook_to_frontend(notebook_filepaths)
response_formatted = jsonify(all_operation_types=data_to_frontend[0],
all_operations=data_to_frontend[1],
all_if_demonstrated=data_to_frontend[2],
all_kwds=data_to_frontend[3])
# Prevent CORS error
response_formatted.headers.add('Access-Control-Allow-Origin', '*')
return response_formatted
# POST /predict_next_doc2vec
@app.route("/predict_next_doc2vec", methods=["POST"])
def predict_next_doc2vec():
if request.method == "POST":
print("Inferring next sequence")
# Axios request body is {notebook: stringified json}
# So we need to access the notebook field and parse it with json.loads
notebookSrc = json.loads(request.get_json()['notebook'])
print("notebooksrc json is", notebookSrc)
print("Notebook is", notebookSrc.keys())
# Do inference
topNotebooks = inferenceRNN_doc2vec(notebookSrc, retrievalDB_doc2vec)
notebook_filepaths = []
# Parse the returned results
for (name, seqNum ) in topNotebooks:
# Name format is "competition\filename_seqNum"
competition = name.split('\\')[0]
filename_and_idx = name.split('\\')[1]
filename = filename_and_idx.split('_')[0]
idx = filename_and_idx.split('_')[1]
filepath = PATH_TO_NOTEBOOKS + '/' + competition + '/' + filename + '.ipynb'
notebook_filepaths.append(filepath)
print("notebooks filepaths is", notebook_filepaths)
response = jsonify(topNotebooks)
data_to_frontend = notebook_to_frontend(notebook_filepaths)
response_formatted = jsonify(all_operation_types=data_to_frontend[0],
all_operations=data_to_frontend[1],
all_if_demonstrated=data_to_frontend[2],
all_kwds=data_to_frontend[3])
# Prevent CORS error
response_formatted.headers.add('Access-Control-Allow-Origin', '*')
return response_formatted
@app.route("/search_by_nl", methods=["POST"])
def search_by_nl():
if request.method == "POST":
return jsonify(hello="world search by nl")
return app
def main(args):
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
print("hostname is", hostname)
print("local ip is", local_ip)
app = create_app()
app.run(host=args.host, debug=True, port=args.port)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--beam_size", default=10, type=int, help="beam size for beam search"
)
parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available")
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int, default=5000)
args = parser.parse_args()
args.device_name = "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
args.device = torch.device(args.device_name)
args.beam_size = (args.beam_size if torch.cuda.is_available() and not args.no_cuda else 1)
main(args)
| 2.328125 | 2 |
dbcArchives/2021/000_6-sds-3-x-dl/055_DLbyABr_04-ConvolutionalNetworks.py | r-e-x-a-g-o-n/scalable-data-science | 138 | 12787866 | <filename>dbcArchives/2021/000_6-sds-3-x-dl/055_DLbyABr_04-ConvolutionalNetworks.py<gh_stars>100-1000
# Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# MAGIC
# MAGIC This is a 2019-2021 augmentation and update of [<NAME>](https://www.linkedin.com/in/adbreind)'s initial notebooks.
# MAGIC
# MAGIC _Thanks to [<NAME>](https://www.linkedin.com/in/christianvonkoch/) and [<NAME>](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/) for their contributions towards making these materials Spark 3.0.1 and Python 3+ compliant._
# COMMAND ----------
# MAGIC %md
# MAGIC # Convolutional Neural Networks
# MAGIC ## aka CNN, ConvNet
# COMMAND ----------
# MAGIC %md
# MAGIC As a baseline, let's start a lab running with what we already know.
# MAGIC
# MAGIC We'll take our deep feed-forward multilayer perceptron network, with ReLU activations and reasonable initializations, and apply it to learning the MNIST digits.
# MAGIC
# MAGIC The main part of the code looks like the following (full code you can run is in the next cell):
# MAGIC
# MAGIC ```
# MAGIC # imports, setup, load data sets
# MAGIC
# MAGIC model = Sequential()
# MAGIC model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(15, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
# MAGIC model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
# MAGIC
# MAGIC categorical_labels = to_categorical(y_train, num_classes=10)
# MAGIC
# MAGIC history = model.fit(X_train, categorical_labels, epochs=100, batch_size=100)
# MAGIC
# MAGIC # print metrics, plot errors
# MAGIC ```
# MAGIC
# MAGIC Note the changes, which are largely about building a classifier instead of a regression model:
# MAGIC * Output layer has one neuron per category, with softmax activation
# MAGIC * __Loss function is cross-entropy loss__
# MAGIC * Accuracy metric is categorical accuracy
# COMMAND ----------
# MAGIC %md
# MAGIC Let's hold pointers into wikipedia for these new concepts.
# COMMAND ----------
# MAGIC %scala
# MAGIC //This allows easy embedding of publicly available information into any other notebook
# MAGIC //Example usage:
# MAGIC // displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
# MAGIC def frameIt( u:String, h:Int ) : String = {
# MAGIC """<iframe
# MAGIC src=""""+ u+""""
# MAGIC width="95%" height="""" + h + """"
# MAGIC sandbox>
# MAGIC <p>
# MAGIC <a href="http://spark.apache.org/docs/latest/index.html">
# MAGIC Fallback link for browsers that, unlikely, don't support frames
# MAGIC </a>
# MAGIC </p>
# MAGIC </iframe>"""
# MAGIC }
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Cross_entropy#Cross-entropy_error_function_and_logistic_regression",500))
# COMMAND ----------
# MAGIC %scala
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Softmax_function",380))
# COMMAND ----------
# MAGIC %md
# MAGIC The following is from: [https://www.quora.com/How-does-Keras-calculate-accuracy](https://www.quora.com/How-does-Keras-calculate-accuracy).
# MAGIC
# MAGIC **Categorical accuracy:**
# MAGIC
# MAGIC ```%python
# MAGIC def categorical_accuracy(y_true, y_pred):
# MAGIC return K.cast(K.equal(K.argmax(y_true, axis=-1),
# MAGIC K.argmax(y_pred, axis=-1)),
# MAGIC K.floatx())
# MAGIC ```
# MAGIC
# MAGIC > `K.argmax(y_true)` takes the highest value to be the prediction and matches against the comparative set.
# COMMAND ----------
# MAGIC %md
# MAGIC Watch (1:39)
# MAGIC * [](https://www.youtube.com/watch?v=tRsSi_sqXjI)
# MAGIC
# MAGIC Watch (1:54)
# MAGIC * [](https://www.youtube.com/watch?v=x449QQDhMDE)
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import sklearn.datasets
import datetime
import matplotlib.pyplot as plt
import numpy as np
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"
test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"
X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784)
X_train = X_train.toarray()
X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784)
X_test = X_test.toarray()
model = Sequential()
model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
model.add(Dense(15, kernel_initializer='normal', activation='relu'))
model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
categorical_labels = to_categorical(y_train, num_classes=10)
start = datetime.datetime.today()
history = model.fit(X_train, categorical_labels, epochs=40, batch_size=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_test, to_categorical(y_test, num_classes=10))
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
print ("Start: " + str(start))
end = datetime.datetime.today()
print ("End: " + str(end))
print ("Elapse: " + str(end-start))
# COMMAND ----------
# MAGIC %md
# MAGIC after about a minute we have:
# MAGIC
# MAGIC ```
# MAGIC ...
# MAGIC
# MAGIC Epoch 40/40
# MAGIC 1s - loss: 0.0610 - categorical_accuracy: 0.9809 - val_loss: 0.1918 - val_categorical_accuracy: 0.9583
# MAGIC
# MAGIC ...
# MAGIC
# MAGIC loss: 0.216120
# MAGIC
# MAGIC categorical_accuracy: 0.955000
# MAGIC
# MAGIC Start: 2017-12-06 07:35:33.948102
# MAGIC
# MAGIC End: 2017-12-06 07:36:27.046130
# MAGIC
# MAGIC Elapse: 0:00:53.098028
# MAGIC ```
# COMMAND ----------
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.set_size_inches((5,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC What are the big takeaways from this experiment?
# MAGIC
# MAGIC 1. We get pretty impressive "apparent error" accuracy right from the start! A small network gets us to training accuracy 97% by epoch 20
# MAGIC 2. The model *appears* to continue to learn if we let it run, although it does slow down and oscillate a bit.
# MAGIC 3. Our test accuracy is about 95% after 5 epochs and never gets better ... it gets worse!
# MAGIC 4. Therefore, we are overfitting very quickly... most of the "training" turns out to be a waste.
# MAGIC 5. For what it's worth, we get 95% accuracy without much work.
# MAGIC
# MAGIC This is not terrible compared to other, non-neural-network approaches to the problem. After all, we could probably tweak this a bit and do even better.
# MAGIC
# MAGIC But we talked about using deep learning to solve "95%" problems or "98%" problems ... where one error in 20, or 50 simply won't work. If we can get to "multiple nines" of accuracy, then we can do things like automate mail sorting and translation, create cars that react properly (all the time) to street signs, and control systems for robots or drones that function autonomously.
# MAGIC
# MAGIC Try two more experiments (try them separately):
# MAGIC 1. Add a third, hidden layer.
# MAGIC 2. Increase the size of the hidden layers.
# MAGIC
# MAGIC Adding another layer slows things down a little (why?) but doesn't seem to make a difference in accuracy.
# MAGIC
# MAGIC Adding a lot more neurons into the first topology slows things down significantly -- 10x as many neurons, and only a marginal increase in accuracy. Notice also (in the plot) that the learning clearly degrades after epoch 50 or so.
# MAGIC
# MAGIC ... We need a new approach!
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC ... let's think about this:
# MAGIC
# MAGIC ### What is layer 2 learning from layer 1? Combinations of pixels
# MAGIC
# MAGIC #### Combinations of pixels contain information but...
# MAGIC
# MAGIC There are a lot of them (combinations) and they are "fragile"
# MAGIC
# MAGIC In fact, in our last experiment, we basically built a model that memorizes a bunch of "magic" pixel combinations.
# MAGIC
# MAGIC What might be a better way to build features?
# MAGIC
# MAGIC * When humans perform this task, we look not at arbitrary pixel combinations, but certain geometric patterns -- lines, curves, loops.
# MAGIC * These features are made up of combinations of pixels, but they are far from arbitrary
# MAGIC * We identify these features regardless of translation, rotation, etc.
# MAGIC
# MAGIC Is there a way to get the network to do the same thing?
# MAGIC
# MAGIC I.e., in layer one, identify pixels. Then in layer 2+, identify abstractions over pixels that are translation-invariant 2-D shapes?
# MAGIC
# MAGIC We could look at where a "filter" that represents one of these features (e.g., and edge) matches the image.
# MAGIC
# MAGIC How would this work?
# MAGIC
# MAGIC ### Convolution
# MAGIC
# MAGIC Convolution in the general mathematical sense is define as follows:
# MAGIC
# MAGIC <img src="https://i.imgur.com/lurC2Cx.png" width=300>
# MAGIC
# MAGIC The convolution we deal with in deep learning is a simplified case. We want to compare two signals. Here are two visualizations, courtesy of Wikipedia, that help communicate how convolution emphasizes features:
# MAGIC
# MAGIC <img src="http://i.imgur.com/EDCaMl2.png" width=500>
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC #### Here's an animation (where we change \\({\tau}\\))
# MAGIC <img src="http://i.imgur.com/0BFcnaw.gif">
# MAGIC
# MAGIC __In one sense, the convolution captures and quantifies the pattern matching over space__
# MAGIC
# MAGIC If we perform this in two dimensions, we can achieve effects like highlighting edges:
# MAGIC
# MAGIC <img src="http://i.imgur.com/DKEXIII.png">
# MAGIC
# MAGIC The matrix here, also called a convolution kernel, is one of the functions we are convolving. Other convolution kernels can blur, "sharpen," etc.
# MAGIC
# MAGIC ### So we'll drop in a number of convolution kernels, and the network will learn where to use them? Nope. Better than that.
# MAGIC
# MAGIC ## We'll program in the *idea* of discrete convolution, and the network will learn what kernels extract meaningful features!
# MAGIC
# MAGIC The values in a (fixed-size) convolution kernel matrix will be variables in our deep learning model. Although inuitively it seems like it would be hard to learn useful params, in fact, since those variables are used repeatedly across the image data, it "focuses" the error on a smallish number of parameters with a lot of influence -- so it should be vastly *less* expensive to train than just a huge fully connected layer like we discussed above.
# MAGIC
# MAGIC This idea was developed in the late 1980s, and by 1989, <NAME> (at AT&T/Bell Labs) had built a practical high-accuracy system (used in the 1990s for processing handwritten checks and mail).
# MAGIC
# MAGIC __How do we hook this into our neural networks?__
# MAGIC
# MAGIC * First, we can preserve the geometric properties of our data by "shaping" the vectors as 2D instead of 1D.
# MAGIC
# MAGIC * Then we'll create a layer whose value is not just activation applied to weighted sum of inputs, but instead it's the result of a dot-product (element-wise multiply and sum) between the kernel and a patch of the input vector (image).
# MAGIC * This value will be our "pre-activation" and optionally feed into an activation function (or "detector")
# MAGIC
# MAGIC <img src="http://i.imgur.com/ECyi9lL.png">
# MAGIC
# MAGIC
# MAGIC If we perform this operation at lots of positions over the image, we'll get lots of outputs, as many as one for every input pixel.
# MAGIC
# MAGIC
# MAGIC <img src="http://i.imgur.com/WhOrJ0Y.jpg">
# MAGIC
# MAGIC * So we'll add another layer that "picks" the highest convolution pattern match from nearby pixels, which
# MAGIC * makes our pattern match a little bit translation invariant (a fuzzy location match)
# MAGIC * reduces the number of outputs significantly
# MAGIC * This layer is commonly called a pooling layer, and if we pick the "maximum match" then it's a "max pooling" layer.
# MAGIC
# MAGIC <img src="http://i.imgur.com/9iPpfpb.png">
# MAGIC
# MAGIC __The end result is that the kernel or filter together with max pooling creates a value in a subsequent layer which represents the appearance of a pattern in a local area in a prior layer.__
# MAGIC
# MAGIC __Again, the network will be given a number of "slots" for these filters and will learn (by minimizing error) what filter values produce meaningful features. This is the key insight into how modern image-recognition networks are able to generalize -- i.e., learn to tell 6s from 7s or cats from dogs.__
# MAGIC
# MAGIC <img src="http://i.imgur.com/F8eH3vj.png">
# MAGIC
# MAGIC ## Ok, let's build our first ConvNet:
# MAGIC
# MAGIC First, we want to explicity shape our data into a 2-D configuration. We'll end up with a 4-D tensor where the first dimension is the training examples, then each example is 28x28 pixels, and we'll explicitly say it's 1-layer deep. (Why? with color images, we typically process over 3 or 4 channels in this last dimension)
# MAGIC
# MAGIC A step by step animation follows:
# MAGIC * http://cs231n.github.io/assets/conv-demo/index.html
# COMMAND ----------
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"
test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"
X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784)
X_train = X_train.toarray()
X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784)
X_test = X_test.toarray()
X_train = X_train.reshape( (X_train.shape[0], 28, 28, 1) )
X_train = X_train.astype('float32')
X_train /= 255
y_train = to_categorical(y_train, num_classes=10)
X_test = X_test.reshape( (X_test.shape[0], 28, 28, 1) )
X_test = X_test.astype('float32')
X_test /= 255
y_test = to_categorical(y_test, num_classes=10)
# COMMAND ----------
# MAGIC %md
# MAGIC Now the model:
# COMMAND ----------
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(8, # number of kernels
(4, 4), # kernel size
padding='valid', # no padding; output will be smaller than input
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu')) # alternative syntax for applying activation
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# COMMAND ----------
# MAGIC %md
# MAGIC ... and the training loop and output:
# COMMAND ----------
start = datetime.datetime.today()
history = model.fit(X_train, y_train, batch_size=128, epochs=8, verbose=2, validation_split=0.1)
scores = model.evaluate(X_test, y_test, verbose=1)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
fig, ax = plt.subplots()
fig.set_size_inches((5,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Our MNIST ConvNet
# MAGIC
# MAGIC In our first convolutional MNIST experiment, we get to almost 99% validation accuracy in just a few epochs (a minutes or so on CPU)!
# MAGIC
# MAGIC The training accuracy is effectively 100%, though, so we've almost completely overfit (i.e., memorized the training data) by this point and need to do a little work if we want to keep learning.
# MAGIC
# MAGIC Let's add another convolutional layer:
# COMMAND ----------
model = Sequential()
model.add(Conv2D(8, # number of kernels
(4, 4), # kernel size
padding='valid',
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(Conv2D(8, (4, 4)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2, validation_split=0.1)
scores = model.evaluate(X_test, y_test, verbose=1)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
# MAGIC %md
# MAGIC While that's running, let's look at a number of "famous" convolutional networks!
# MAGIC
# MAGIC ### LeNet (<NAME>, 1998)
# MAGIC
# MAGIC <img src="http://i.imgur.com/k5hMtMK.png">
# MAGIC
# MAGIC <img src="http://i.imgur.com/ERV9pHW.gif">
# COMMAND ----------
# MAGIC %md <img src="http://i.imgur.com/TCN9C4P.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ### AlexNet (2012)
# MAGIC
# MAGIC <img src="http://i.imgur.com/CpokDKV.jpg">
# MAGIC
# MAGIC <img src="http://i.imgur.com/Ld2QhXr.jpg">
# COMMAND ----------
# MAGIC %md
# MAGIC ### Back to our labs: Still Overfitting
# MAGIC
# MAGIC We're making progress on our test error -- about 99% -- but just a bit for all the additional time, due to the network overfitting the data.
# MAGIC
# MAGIC There are a variety of techniques we can take to counter this -- forms of regularization.
# MAGIC
# MAGIC Let's try a relatively simple solution solution that works surprisingly well: add a pair of `Dropout` filters, a layer that randomly omits a fraction of neurons from each training batch (thus exposing each neuron to only part of the training data).
# MAGIC
# MAGIC We'll add more convolution kernels but shrink them to 3x3 as well.
# COMMAND ----------
model = Sequential()
model.add(Conv2D(32, # number of kernels
(3, 3), # kernel size
padding='valid',
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(rate=1-0.25)) # <- regularize, new parameter rate added (rate=1-keep_prob)
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(rate=1-0.5)) # <-regularize, new parameter rate added (rate=1-keep_prob)
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2)
scores = model.evaluate(X_test, y_test, verbose=2)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
# MAGIC %md
# MAGIC While that's running, let's look at some more recent ConvNet architectures:
# MAGIC
# MAGIC ### VGG16 (2014)
# MAGIC
# MAGIC <img src="http://i.imgur.com/gl4kZDf.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ### GoogLeNet (2014)
# MAGIC
# MAGIC <img src="http://i.imgur.com/hvmtDqN.png">
# MAGIC
# MAGIC *"Inception" layer: parallel convolutions at different resolutions*
# MAGIC
# MAGIC ### Residual Networks (2015-)
# MAGIC
# MAGIC Skip layers to improve training (error propagation). Residual layers learn from details at multiple previous layers.
# MAGIC
# MAGIC <img src="http://i.imgur.com/32g8Ykl.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ---
# MAGIC
# MAGIC > __ASIDE: Atrous / Dilated Convolutions__
# MAGIC
# MAGIC > An atrous or dilated convolution is a convolution filter with "holes" in it. Effectively, it is a way to enlarge the filter spatially while not adding as many parameters or attending to every element in the input.
# MAGIC
# MAGIC > Why? Covering a larger input volume allows recognizing coarser-grained patterns; restricting the number of parameters is a way of regularizing or constraining the capacity of the model, making training easier.
# MAGIC
# MAGIC ---
# COMMAND ----------
# MAGIC %md
# MAGIC ## *Lab Wrapup*
# MAGIC
# MAGIC From the last lab, you should have a test accuracy of over 99.1%
# MAGIC
# MAGIC For one more activity, try changing the optimizer to old-school "sgd" -- just to see how far we've come with these modern gradient descent techniques in the last few years.
# MAGIC
# MAGIC Accuracy will end up noticeably worse ... about 96-97% test accuracy. Two key takeaways:
# MAGIC
# MAGIC * Without a good optimizer, even a very powerful network design may not achieve results
# MAGIC * In fact, we could replace the word "optimizer" there with
# MAGIC * initialization
# MAGIC * activation
# MAGIC * regularization
# MAGIC * (etc.)
# MAGIC * All of these elements we've been working with operate together in a complex way to determine final performance
# COMMAND ----------
# MAGIC %md
# MAGIC Of course this world evolves fast - see the new kid in the CNN block -- **capsule networks**
# MAGIC
# MAGIC > Hinton: “The pooling operation used in convolutional neural networks is a big mistake and the fact that it works so well is a disaster.”
# MAGIC
# MAGIC Well worth the 8 minute read:
# MAGIC * [https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b](https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b)
# MAGIC
# MAGIC To understand deeper:
# MAGIC * original paper: [https://arxiv.org/abs/1710.09829](https://arxiv.org/abs/1710.09829)
# MAGIC
# MAGIC [Keras capsule network example](https://keras.io/examples/cifar10_cnn_capsule/)
# COMMAND ----------
# MAGIC %md
# MAGIC # More resources
# MAGIC
# MAGIC - http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
# MAGIC - https://openai.com/
# COMMAND ----------
| 2.484375 | 2 |
app/custom_topo.py | viniciusarcanjo/dvel | 1 | 12787867 | <filename>app/custom_topo.py
#!/usr/bin/python
"""
Containernet custom topology
"""
import copy
import re
import signal
import subprocess
import sys
import os
from mininet.net import Containernet
from mininet.node import RemoteController
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.log import info, setLogLevel
setLogLevel("info")
def handler(signum, frame):
info("*** Stopping network")
net.stop()
docker_stop_mn_hosts()
sys.exit(0)
def docker_stop_mn_hosts(rm=False):
"""Stop and clean up extra mininet hosts"""
try:
pass
host_re = r".*?(mn.\w+)"
out = subprocess.check_output(["docker", "ps"], universal_newlines=True)
for l in out.split("\n"):
g = re.match(host_re, l)
if g:
subprocess.run(
["docker", "stop", g.group(1)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if rm:
subprocess.run(
["docker", "rm", g.group(1)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except (IOError, FileNotFoundError):
pass
"""
Topology
- hosts: d1, d2 (used for data plane tests, send untagged traffic in this topo)
- edge_sws: s1, s2 (mainly used for pushing and popping VLANs on hosts)
- bb_sws: s3, s4, s5
datapath-id follows this pattern "00:00:00:00:00:00:00:sw", where sw is the switch number
edge/host backbone edge/host
---------------- ------------------------------- ------------------
| |
| s3 (2) -- (2) s4 |
d1 -- (1) s1 (2)| -- (1) s3 (3) -- (3) s4 (1) --| (2) s2 (1) -- d2
d3 -- (3) | s3 (4) -- (4) s4 | (3) -- d6
d4 -- (4) | | (4) -- d7
d5 -- (5) | | (5) -- d8
| |
---------------- ------------------------------- ------------------
"""
# To gracefully shutdown
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
# IP addressing
host_d1 = "d1"
host_d2 = "d2"
host_d3 = "d3"
host_d4 = "d4"
host_d5 = "d5"
host_d6 = "d6"
host_d7 = "d7"
host_d8 = "d8"
env = {
host_d1: "10.0.0.1",
host_d2: "10.0.0.2",
host_d3: "10.0.0.3",
host_d4: "10.0.0.4",
host_d5: "10.0.0.5",
host_d6: "10.0.0.6",
host_d7: "10.0.0.7",
host_d8: "10.0.0.8",
"DB_SERVER": "172.17.0.1",
"DB_NAME": "dvel",
"ENDPOINT": "echo"
}
controller_ip = "127.0.0.1"
if os.environ.get("ofcontroller_ip"):
controller_ip = os.environ.get("ofcontroller_ip")
controller_port = 6633
if os.environ.get("ofcontroller_port"):
controller_port = int(os.environ.get("ofcontroller_port"))
info("*** Cleaning up ***")
docker_stop_mn_hosts(True)
info("*** Instantiating Network elements ***")
c0 = RemoteController("c0", ip=controller_ip, port=controller_port)
net = Containernet()
info("*** Adding controller\n")
net.addController(c0)
info("*** Adding docker containers\n")
d1_env = copy.copy(env)
d1_env["HTTP_SERVER"] = d1_env[host_d2]
d1_env["CONTAINER"] = host_d1
d1 = net.addDocker(
host_d1,
ip=env["d1"],
dimage="registry.gitlab.com/viniarck/containernet-docker:client",
dcmd="/sbin/my_init -- python3 client.py",
environment=d1_env,
)
d2 = net.addDocker(
host_d2,
ip=env["d2"],
dcmd="/sbin/my_init -- python3 server.py",
dimage="registry.gitlab.com/viniarck/containernet-docker:server",
environment=env,
)
d3_env = copy.copy(env)
d3_env["HTTP_SERVER"] = d3_env[host_d6]
d3_env["CONTAINER"] = host_d3
d3 = net.addDocker(
host_d3,
ip=env["d3"],
dcmd="/sbin/my_init -- python3 client.py",
dimage="registry.gitlab.com/viniarck/containernet-docker:client",
environment=d3_env,
)
d4_env = copy.copy(env)
d4_env["HTTP_SERVER"] = d4_env[host_d7]
d4_env["CONTAINER"] = host_d3
d4 = net.addDocker(
host_d4,
ip=env["d4"],
dcmd="/sbin/my_init -- python3 client.py",
dimage="registry.gitlab.com/viniarck/containernet-docker:client",
environment=d4_env,
)
d5_env = copy.copy(env)
d5_env["HTTP_SERVER"] = d5_env[host_d8]
d5_env["CONTAINER"] = host_d3
d5 = net.addDocker(
host_d5,
ip=env["d5"],
dcmd="/sbin/my_init -- python3 client.py",
dimage="registry.gitlab.com/viniarck/containernet-docker:client",
environment=d5_env,
)
d6 = net.addDocker(
host_d6,
ip=env["d6"],
dcmd="/sbin/my_init -- python3 server.py",
dimage="registry.gitlab.com/viniarck/containernet-docker:server",
environment=env,
)
d7 = net.addDocker(
host_d7,
ip=env["d7"],
dcmd="/sbin/my_init -- python3 server.py",
dimage="registry.gitlab.com/viniarck/containernet-docker:server",
environment=env,
)
d8 = net.addDocker(
host_d8,
ip=env["d8"],
dcmd="/sbin/my_init -- python3 server.py",
dimage="registry.gitlab.com/viniarck/containernet-docker:server",
environment=env,
)
info("*** Adding switches\n")
s1 = net.addSwitch("s1")
s2 = net.addSwitch("s2")
s3 = net.addSwitch("s3")
s4 = net.addSwitch("s4")
info("*** Creating links\n")
net.addLink(s1, d1, port1=1)
net.addLink(s1, d3, port1=3)
net.addLink(s1, d4, port1=4)
net.addLink(s1, d5, port1=5)
net.addLink(s2, d2, port1=1)
net.addLink(s2, d6, port1=3)
net.addLink(s2, d7, port1=4)
net.addLink(s2, d8, port1=5)
net.addLink(s1, s3, port1=2, port2=1, cls=TCLink, delay="1ms", bw=1000)
net.addLink(s2, s4, port1=2, port2=1, cls=TCLink, delay="1ms", bw=1000)
net.addLink(s3, s4, port1=2, port2=2, cls=TCLink, delay="25ms", bw=1000)
net.addLink(s3, s4, port1=3, port2=3, cls=TCLink, delay="50ms", bw=1000)
net.addLink(s3, s4, port1=4, port2=4, cls=TCLink, delay="100ms", bw=1000)
info("*** Starting network\n")
net.start()
info("*** Running CLI\n")
CLI(net)
| 2.28125 | 2 |
timberAllocation/middleRepr.py | KOLANICH-research/timberAllocation | 0 | 12787868 | <filename>timberAllocation/middleRepr.py
import typing
import numpy as np
def getResultVector(initialLengths, finalLengths):
return np.array(tuple(initialLengths) + tuple(finalLengths))
def getMiddleStateMaxSize(initialLengths, finalLengths):
return len(initialLengths) + len(finalLengths) - 1
def vectorFactorization(vec: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:
"""I have probably reinvented a wheel. I have searched the Internet and haven't found this kind of factorization.
Factors a vector vec into the product of an "upper-triangular-like" (mostly upper triangular, but with holes and elements in the lower triangle when necessary) made of 1es matrix `U` and a basis row-vector `b`, so b @ U === vec.
"""
vec = np.array(vec)
dtype = vec[0].__class__ # so works with Decimal too. In this case arrays dtype is `object`.
basis = np.full(vec.shape[0], dtype(np.nan))
matrix = np.zeros((basis.shape[0], vec.shape[0]), dtype=bool)
i = -1
basisSize = 0
while (vec > 0).any():
remainingSizes = sorted(set(vec))
if len(remainingSizes) >= 2:
secondLargest, largest = remainingSizes[-2:]
basisVec = largest - secondLargest
else: # 1 size only
basisVec = remainingSizes[0]
secondLargest = 0
basis[i] = basisVec
for j, s in enumerate(vec):
if s == secondLargest:
matrix[matrix.shape[0] + i, j] = 0
else:
if s >= basisVec:
matrix[matrix.shape[0] + i, j] = 1
vec[j] -= basisVec
else:
matrix[matrix.shape[0] + i, j] = 0
i -= 1
basisSize += 1
return basis[-basisSize:], matrix[-basisSize:, :]
def minimalReprToGraph(shared, mat, initialLengths, finalLengths):
import networkx
g = networkx.DiGraph()
for n in initialLengths:
if isinstance(n, float) and n.is_integer():
n = int(n)
g.add_node(n, color="green")
for n in finalLengths:
if isinstance(n, float) and n.is_integer():
n = int(n)
g.add_node(n, color="red")
for i, l in enumerate(initialLengths + finalLengths):
if isinstance(l, float) and l.is_integer():
l = int(l)
for j, sL in enumerate(shared):
sL = float(sL)
if sL.is_integer():
sL = int(sL)
if sL != l and mat[j, i]:
g.add_edge(l, sL)
return g
| 2.78125 | 3 |
intent_server/__init__.py | visdesignlab/intent-system | 3 | 12787869 | from flask import Flask
from .views import views
def create_app() -> Flask:
app = Flask(
__name__,
static_url_path='',
static_folder='../app/build',
)
app.register_blueprint(views)
return app
| 1.898438 | 2 |
gyoithon/migrations/0006_auto_20210506_1422.py | gyoisamurai/GyoiBoard | 3 | 12787870 | # Generated by Django 3.1.7 on 2021-05-06 05:22
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('gyoithon', '0005_auto_20210506_1358'),
]
operations = [
migrations.AlterField(
model_name='domain',
name='registration_date',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 6, 5, 22, 59, 129066, tzinfo=utc)),
),
migrations.AlterField(
model_name='organization',
name='registration_date',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 6, 5, 22, 59, 128595, tzinfo=utc)),
),
migrations.AlterField(
model_name='organization',
name='status',
field=models.CharField(default='N/A', max_length=10, verbose_name='Status'),
),
migrations.AlterField(
model_name='subdomain',
name='registration_date',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 6, 5, 22, 59, 129596, tzinfo=utc)),
),
]
| 1.710938 | 2 |
mydata_did/patched_protocols/issue_credential/v1_0/messages/credential_ack.py | decentralised-dataexchange/acapy-mydata-did-protocol | 1 | 12787871 | <reponame>decentralised-dataexchange/acapy-mydata-did-protocol
"""A credential ack message."""
from marshmallow import EXCLUDE
from aries_cloudagent.messaging.ack.message import Ack, AckSchema
from aries_cloudagent.messaging.decorators.base import BaseDecoratorSet
from ..message_types import CREDENTIAL_ACK, PROTOCOL_PACKAGE
from .....v1_0.decorators.patched_decorator_set import PatchedDecoratorSet
HANDLER_CLASS = (
f"{PROTOCOL_PACKAGE}.handlers.credential_ack_handler.CredentialAckHandler"
)
class CredentialAck(Ack):
"""Class representing a credential ack message."""
class Meta:
"""Credential metadata."""
handler_class = HANDLER_CLASS
schema_class = "CredentialAckSchema"
message_type = CREDENTIAL_ACK
def __init__(self, **kwargs):
"""Initialize credential object."""
super().__init__(_decorators = PatchedDecoratorSet(), **kwargs)
class CredentialAckSchema(AckSchema):
"""Credential ack schema."""
class Meta:
"""Schema metadata."""
model_class = CredentialAck
unknown = EXCLUDE
| 1.820313 | 2 |
oldqa/qa/src/dedt_tests/Test_provenanceRewriter.py | KDahlgren/pyLDFI | 6 | 12787872 | <gh_stars>1-10
#!/usr/bin/env python
'''
Test_provenanceRewriter.py
Defines unit tests for provenanceRewriter.py from src/dedt/.
'''
#############
# IMPORTS #
#############
# standard python packages
import inspect, os, sqlite3, sys, unittest
from StringIO import StringIO
# ------------------------------------------------------ #
# import sibling packages HERE!!!
sys.path.append( os.path.abspath( __file__ + "/../../../../src" ) )
from dedt import dedt, dedalusParser, clockRelation, dedalusRewriter
from utils import tools
# ------------------------------------------------------ #
testPath = os.path.abspath(__file__+"/../../../../qa")
##############################
# TEST PROVENANCE REWRITER #
##############################
class Test_provenanceRewriter( unittest.TestCase ) :
###################
# AGG RULE PROV #
###################
def test_aggRuleProv_provenanceRewriter(self):
return None
##############
# GET PROV #
##############
def test_getProv_provenanceRewriter(self):
return None
########################
# REWRITE PROVENANCE #
########################
def test_rewriteProvenance_provenanceRewriter(self):
return None
#########################
# THREAD OF EXECUTION #
#########################
# use this main if running this script exclusively.
if __name__ == "__main__" :
unittest.main( verbosity=2 )
#########
# EOF #
#########
| 2.25 | 2 |
scripts/codegen/__init__.py | cisco-ie/cisco-proto | 6 | 12787873 | """Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from .codegen import Codegen
from .xr import Go as XRGo
from .xe import Go as XEGo
from .nx import Go as NXGo
os_lang_map = {
'XR': {
'Go': XRGo
},
'XE': {
'Go': XEGo
},
'NX': {
'Go': NXGo
}
}
def get_codegen_class(os_name, language):
if os_name not in os_lang_map.keys():
logging.error('%s is not a supported OS!', os_name)
return None
if language not in os_lang_map[os_name].keys():
logging.error('%s is not a supported language for %s!', language, os_name)
return None
return os_lang_map[os_name][language]
def start_codegen(os_list, language_list, config):
if os_list:
logging.info('Preparing codegen for %s.', ', '.join(os_list))
if not set(os_list).issubset(set(os_lang_map.keys())):
logging.error('OS list contains invalid entries!')
return
else:
logging.info('Preparing codegen for all supported OSes.')
os_list = os_lang_map.keys()
if not language_list:
logging.info('All supported languages will be generated.')
for _os in os_list:
languages = language_list if language_list else os_lang_map[_os].keys()
for language in languages:
gen_target = get_codegen_class(_os, language)
if gen_target:
logging.info('Starting %s generation for %s.', language, _os)
gen_target(config).generate()
| 1.867188 | 2 |
models.py | bkotewall/oreilly-takehome | 0 | 12787874 | <reponame>bkotewall/oreilly-takehome<filename>models.py
from database import Base
from sqlalchemy import Column, Integer, String
from sqlalchemy.types import DateTime
from sqlalchemy.dialects.postgresql import ARRAY
class Book(Base):
"""
Books table
"""
__tablename__ = 'book'
id = Column(Integer, primary_key=True)
authors = Column(ARRAY(String, dimensions=1))
title = Column(String(256))
isbn = Column(String(17), unique=True)
description = Column(String)
| 2.65625 | 3 |
api_visualcrossing.py | greggpatton/dash-simple-weather-clock | 0 | 12787875 | # https://www.visualcrossing.com/weather/weather-data-services?pln=plan_GqkYVnzyiNg93X#/timeline
# https://www.visualcrossing.com/weather-api
import requests
import json
# Convert degrees to compass direction
def deg_to_compass(num):
val = int((num / 22.5) + 0.5)
arr = [
"N",
"NNE",
"NE",
"ENE",
"E",
"ESE",
"SE",
"SSE",
"S",
"SSW",
"SW",
"WSW",
"W",
"WNW",
"NW",
"NNW",
]
return arr[(val % 16)]
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
class ApiVisualCrossing:
def __init__(self):
self.data = None
def refresh(self, location="", api_key="", data_units="metric"):
url = (
f"https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/"
f"{location}/today"
f"?unitGroup={data_units}"
f"&key={api_key}"
"&include=fcst%2Ccurrent"
)
result = requests.get(url)
if result.status_code == 200:
self.data = result.json()
else:
self.data = None
def get_timestamp(self):
return self.get_element(("currentConditions", "datetime"), "N/A")
def get_resolved_address(self):
ret_val = "N/A"
if self.data is not None:
ret_val = self.data["resolvedAddress"]
return ret_val
def get_temperature(self):
return self.get_element(("currentConditions", "temp"), "N/A")
def get_feels_like_temperature(self):
return self.get_element(("currentConditions", "feelslike"), "N/A")
def get_low_temperature(self):
return self.get_element(("days", "tempmin"), "N/A")
def get_high_temperature(self):
return self.get_element(("days", "tempmax"), "N/A")
def get_wind_speed(self):
return self.get_element(("currentConditions", "windspeed"), "N/A")
def get_wind_gust(self):
return self.get_element(("currentConditions", "windgust"), "0")
def get_wind_direction(self):
ret_val = self.get_element(("currentConditions", "winddir"), "N/A")
if is_number(ret_val):
ret_val = deg_to_compass(ret_val)
return ret_val
def get_precip(self):
return self.get_element(("currentConditions", "precip"), "0")
def get_precip_prob(self):
return self.get_element(("currentConditions", "precipprob"), "0")
def get_element(self, keys, default="", round_val=True):
ret_val = default
if self.data is not None:
ret_val = self.data[keys[0]]
if isinstance(ret_val, list):
ret_val = ret_val[0][keys[1]]
else:
ret_val = ret_val[keys[1]]
if ret_val:
if round and is_number(ret_val):
ret_val = round(float(ret_val))
else:
ret_val = default
return ret_val
if __name__ == "__main__":
api = ApiVisualCrossing()
api.refresh("32.52402,-97.29605", "")
print(json.dumps(api.data, indent=4))
# print('Address: ', api.get_resolved_address())
# print('Time: ', api.get_timestamp())
# print('Temperature: ', api.get_temperature())
# print('Feels Like: ', api.get_feels_like_temperature())
# print('Low Temperature: ', api.get_low_temperature())
# print('High Temperature: ', api.get_high_temperature())
# print('Wind Speed: ', api.get_wind_speed())
# print('Wind Gust: ', api.get_wind_gust())
# print('Wind Direction From: ', api.get_wind_direction())
# print('Precipitation: ', api.get_precip())
# print('Precipitation Probability: ', api.get_precip_prob())
| 3.140625 | 3 |
lib/galaxy/webapps/galaxy/services/tools.py | thepineapplepirate/galaxy | 0 | 12787876 | <reponame>thepineapplepirate/galaxy
import logging
import shutil
import tempfile
from json import dumps
from typing import (
Any,
Dict,
List,
Optional,
Union,
)
from starlette.datastructures import UploadFile
from galaxy import (
exceptions,
util,
)
from galaxy.config import GalaxyAppConfiguration
from galaxy.managers.collections_util import dictify_dataset_collection_instance
from galaxy.managers.context import (
ProvidesHistoryContext,
ProvidesUserContext,
)
from galaxy.managers.histories import HistoryManager
from galaxy.model import PostJobAction
from galaxy.schema.fetch_data import (
FetchDataFormPayload,
FetchDataPayload,
FilesPayload,
)
from galaxy.security.idencoding import IdEncodingHelper
from galaxy.tools import Tool
from galaxy.tools.search import ToolBoxSearch
from galaxy.webapps.galaxy.services.base import ServiceBase
from ._fetch_util import validate_and_normalize_targets
log = logging.getLogger(__name__)
class ToolsService(ServiceBase):
def __init__(
self,
config: GalaxyAppConfiguration,
toolbox_search: ToolBoxSearch,
security: IdEncodingHelper,
history_manager: HistoryManager,
):
super().__init__(security)
self.config = config
self.toolbox_search = toolbox_search
self.history_manager = history_manager
def create_fetch(
self,
trans: ProvidesHistoryContext,
fetch_payload: Union[FetchDataFormPayload, FetchDataPayload],
files: Optional[List[UploadFile]] = None,
):
payload = fetch_payload.dict(exclude_unset=True)
request_version = "1"
history_id = payload.pop("history_id")
clean_payload = {}
files_payload = {}
if files:
for i, upload_file in enumerate(files):
with tempfile.NamedTemporaryFile(
dir=trans.app.config.new_file_path, prefix="upload_file_data_", delete=False
) as dest:
shutil.copyfileobj(upload_file.file, dest)
upload_file.file.close()
files_payload[f"files_{i}|file_data"] = FilesPayload(
filename=upload_file.filename, local_filename=dest.name
)
for key, value in payload.items():
if key == "key":
continue
if key.startswith("files_") or key.startswith("__files_"):
files_payload[key] = value
continue
clean_payload[key] = value
clean_payload["check_content"] = self.config.check_upload_content
validate_and_normalize_targets(trans, clean_payload)
request = dumps(clean_payload)
create_payload = {
"tool_id": "__DATA_FETCH__",
"history_id": history_id,
"inputs": {
"request_version": request_version,
"request_json": request,
"file_count": str(len(files_payload)),
},
}
create_payload.update(files_payload)
return self._create(trans, create_payload)
def _create(self, trans: ProvidesHistoryContext, payload, **kwd):
if trans.user_is_bootstrap_admin:
raise exceptions.RealUserRequiredException("Only real users can execute tools or run jobs.")
action = payload.get("action")
if action == "rerun":
raise Exception("'rerun' action has been deprecated")
# Get tool.
tool_version = payload.get("tool_version")
tool_id = payload.get("tool_id")
tool_uuid = payload.get("tool_uuid")
get_kwds = dict(
tool_id=tool_id,
tool_uuid=tool_uuid,
tool_version=tool_version,
)
if tool_id is None and tool_uuid is None:
raise exceptions.RequestParameterMissingException("Must specify either a tool_id or a tool_uuid.")
tool = trans.app.toolbox.get_tool(**get_kwds)
if not tool:
log.debug(f"Not found tool with kwds [{get_kwds}]")
raise exceptions.ToolMissingException("Tool not found.")
if not tool.allow_user_access(trans.user):
raise exceptions.ItemAccessibilityException("Tool not accessible.")
if self.config.user_activation_on:
if not trans.user:
log.warning("Anonymous user attempts to execute tool, but account activation is turned on.")
elif not trans.user.active:
log.warning(
f'User "{trans.user.email}" attempts to execute tool, but account activation is turned on and user account is not active.'
)
# Set running history from payload parameters.
# History not set correctly as part of this API call for
# dataset upload.
history_id = payload.get("history_id")
if history_id:
decoded_id = self.decode_id(history_id)
target_history = self.history_manager.get_owned(decoded_id, trans.user, current_history=trans.history)
else:
target_history = None
# Set up inputs.
inputs = payload.get("inputs", {})
if not isinstance(inputs, dict):
raise exceptions.RequestParameterInvalidException(f"inputs invalid {inputs}")
# Find files coming in as multipart file data and add to inputs.
for k, v in payload.items():
if k.startswith("files_") or k.startswith("__files_"):
inputs[k] = v
# for inputs that are coming from the Library, copy them into the history
self._patch_library_inputs(trans, inputs, target_history)
# TODO: encode data ids and decode ids.
# TODO: handle dbkeys
params = util.Params(inputs, sanitize=False)
incoming = params.__dict__
# use_cached_job can be passed in via the top-level payload or among the tool inputs.
# I think it should be a top-level parameter, but because the selector is implemented
# as a regular tool parameter we accept both.
use_cached_job = payload.get("use_cached_job", False) or util.string_as_bool(
inputs.get("use_cached_job", "false")
)
input_format = str(payload.get("input_format", "legacy"))
vars = tool.handle_input(
trans, incoming, history=target_history, use_cached_job=use_cached_job, input_format=input_format
)
new_pja_flush = False
for job in vars.get("jobs", []):
if inputs.get("send_email_notification", False):
# Unless an anonymous user is invoking this via the API it
# should never be an option, but check and enforce that here
if trans.user is None:
raise exceptions.ToolExecutionError("Anonymously run jobs cannot send an email notification.")
else:
job_email_action = PostJobAction("EmailAction")
job.add_post_job_action(job_email_action)
new_pja_flush = True
if new_pja_flush:
trans.sa_session.flush()
return self._handle_inputs_output_to_api_response(trans, tool, target_history, vars)
def _handle_inputs_output_to_api_response(self, trans, tool, target_history, vars):
# TODO: check for errors and ensure that output dataset(s) are available.
output_datasets = vars.get("out_data", [])
rval: Dict[str, Any] = {"outputs": [], "output_collections": [], "jobs": [], "implicit_collections": []}
rval["produces_entry_points"] = tool.produces_entry_points
job_errors = vars.get("job_errors", [])
if job_errors:
# If we are here - some jobs were successfully executed but some failed.
rval["errors"] = job_errors
outputs = rval["outputs"]
# TODO:?? poss. only return ids?
for output_name, output in output_datasets:
output_dict = output.to_dict()
# add the output name back into the output data structure
# so it's possible to figure out which newly created elements
# correspond with which tool file outputs
output_dict["output_name"] = output_name
outputs.append(trans.security.encode_dict_ids(output_dict, skip_startswith="metadata_"))
for job in vars.get("jobs", []):
rval["jobs"].append(self.encode_all_ids(job.to_dict(view="collection"), recursive=True))
for output_name, collection_instance in vars.get("output_collections", []):
history = target_history or trans.history
output_dict = dictify_dataset_collection_instance(
collection_instance,
security=trans.security,
url_builder=trans.url_builder,
parent=history,
)
output_dict["output_name"] = output_name
rval["output_collections"].append(output_dict)
for output_name, collection_instance in vars.get("implicit_collections", {}).items():
history = target_history or trans.history
output_dict = dictify_dataset_collection_instance(
collection_instance,
security=trans.security,
url_builder=trans.url_builder,
parent=history,
)
output_dict["output_name"] = output_name
rval["implicit_collections"].append(output_dict)
return rval
def _search(self, q, view):
"""
Perform the search on the given query.
Boosts and numer of results are configurable in galaxy.ini file.
:param q: the query to search with
:type q: str
:return: Dictionary containing the tools' ids of the best hits.
:return type: dict
"""
panel_view = view or self.config.default_panel_view
tool_name_boost = self.config.get("tool_name_boost", 9)
tool_id_boost = self.config.get("tool_id_boost", 9)
tool_section_boost = self.config.get("tool_section_boost", 3)
tool_description_boost = self.config.get("tool_description_boost", 2)
tool_label_boost = self.config.get("tool_label_boost", 1)
tool_stub_boost = self.config.get("tool_stub_boost", 5)
tool_help_boost = self.config.get("tool_help_boost", 0.5)
tool_search_limit = self.config.get("tool_search_limit", 20)
tool_enable_ngram_search = self.config.get("tool_enable_ngram_search", False)
tool_ngram_minsize = self.config.get("tool_ngram_minsize", 3)
tool_ngram_maxsize = self.config.get("tool_ngram_maxsize", 4)
results = self.toolbox_search.search(
q=q,
panel_view=panel_view,
tool_name_boost=tool_name_boost,
tool_id_boost=tool_id_boost,
tool_section_boost=tool_section_boost,
tool_description_boost=tool_description_boost,
tool_label_boost=tool_label_boost,
tool_stub_boost=tool_stub_boost,
tool_help_boost=tool_help_boost,
tool_search_limit=tool_search_limit,
tool_enable_ngram_search=tool_enable_ngram_search,
tool_ngram_minsize=tool_ngram_minsize,
tool_ngram_maxsize=tool_ngram_maxsize,
)
return results
def _patch_library_inputs(self, trans: ProvidesHistoryContext, inputs, target_history):
"""
Transform inputs from the data library to history items.
"""
for k, v in inputs.items():
new_value = self._patch_library_dataset(trans, v, target_history)
if new_value:
v = new_value
elif isinstance(v, dict) and "values" in v:
for index, value in enumerate(v["values"]):
patched = self._patch_library_dataset(trans, value, target_history)
if patched:
v["values"][index] = patched
inputs[k] = v
def _patch_library_dataset(self, trans: ProvidesHistoryContext, v, target_history):
if isinstance(v, dict) and "id" in v and v.get("src") == "ldda":
ldda = trans.sa_session.query(trans.app.model.LibraryDatasetDatasetAssociation).get(self.decode_id(v["id"]))
if trans.user_is_admin or trans.app.security_agent.can_access_dataset(
trans.get_current_user_roles(), ldda.dataset
):
return ldda.to_history_dataset_association(target_history, add_to_history=True)
#
# -- Helper methods --
#
def _get_tool(self, trans, id, tool_version=None, user=None) -> Tool:
tool = trans.app.toolbox.get_tool(id, tool_version)
if not tool:
raise exceptions.ObjectNotFound(f"Could not find tool with id '{id}'.")
if not tool.allow_user_access(user):
raise exceptions.AuthenticationFailed(f"Access denied, please login for tool with id '{id}'.")
return tool
def _detect(self, trans: ProvidesUserContext, tool_id):
"""
Detect whether the tool with the given id is installed.
:param tool_id: exact id of the tool
:type tool_id: str
:return: list with available versions
"return type: list
"""
tools = trans.app.toolbox.get_tool(tool_id, get_all_versions=True)
detected_versions = []
if tools:
for tool in tools:
if tool and tool.allow_user_access(trans.user):
detected_versions.append(tool.version)
return detected_versions
| 1.953125 | 2 |
house/items.py | huajianmao/house | 0 | 12787877 | <reponame>huajianmao/house
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class LianJiaBriefItem(scrapy.Item):
name = scrapy.Filed()
url = scrapy.Filed()
lianJiaBianHao = scrapy.Filed()
xiaoQuMingCheng = scrapy.Field()
xiaoQuUrl = scrapy.Field()
fangWuHuXing = scrapy.Field()
fangWuMianJi = scrapy.Filed()
fangWuChaoXiang = scrapy.Filed()
zhuangXiuQingKuang = scrapy.Field()
peiBeiDianTi = scrapy.Field()
suoZaiLouCeng = scrapy.Field()
zongLouCeng = scrapy.Field()
jianFangShiJian = scrapy.Filed()
jianZhuLeiXing = scrapy.Filed()
shangQuan = scrapy.Filed()
shangQuanUrl = scrapy.Field()
guanZhuRenShu = scrapy.Field()
kanGuoRenShu = scrapy.Field()
faBuShiJian = scrapy.Field()
fangYuanBiaoQian = scrapy.Field()
zongJiaGe = scrapy.Field()
danWeiFangJia = scrapy.Field()
class LianJiaDetailItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
lianJiaBianHao = scrapy.Field()
guanZhuRenShu = scrapy.Field()
kanGuoRenShu = scrapy.Field()
zongJiaGe = scrapy.Field()
danWeiFangJia = scrapy.Field()
shuiFeiMiaoShu = scrapy.Filed()
fangWuChaoXiang = scrapy.Filed()
fangWuMianJi = scrapy.Filed()
jianFangShiJian = scrapy.Filed()
xiaoQuMingCheng = scrapy.Field()
suoZaiQuYu = scrapy.Field()
desc = scrapy.Field()
fangWuHuXing = scrapy.Field()
suoZaiLouCeng = scrapy.Field()
zongLouCeng = scrapy.Field()
jianZhuMianJi = scrapy.Field()
jianZhuLeiXing = scrapy.Field()
fangWuChaoXiang = scrapy.Field()
jianZhuJieGou = scrapy.Field()
zhuangXiuQingKuang = scrapy.Field()
tiHuBiLi = scrapy.Field()
gongNuanFangShi = scrapy.Field()
peiBeiDianTi = scrapy.Field()
guaPaiShiJian = scrapy.Field()
jiaoYiQuanSu = scrapy.Field()
shangCiJiaoYi = scrapy.Field()
fangWuYongTu = scrapy.Field()
fangBenNianXian = scrapy.Field()
chanQuanSuoShu = scrapy.Field()
diYaXinXi = scrapy.Field()
fangBenBeiJian = scrapy.Field()
fangYuanBiaoQian = scrapy.Field()
heXinMaiDian = scrapy.Field()
huXingJieShao = scrapy.Field()
shuiFeiJieXi = scrapy.Field()
jiaoTongChuXing = scrapy.Field()
xiaoQuJieShao = scrapy.Field()
huXingFenJian = scrapy.Filed()
fangYuanZhaoPian = scrapy.Filed()
kanFangJiLu = scrapy.Filed()
| 2.078125 | 2 |
MyApi/scrapingApp/api/serializer.py | Georgitanev/py_django_scrape | 0 | 12787878 | """ Serializer"""
from rest_framework import serializers
from scrapingApp.models import Parliament1
class ParliamentSerializer(serializers.ModelSerializer):
""" table columns """
class Meta:
""" table columns """
model = Parliament1
fields = [
"id",
"date_born",
"name",
"place_born",
"profession",
"lang",
"party",
"email",
"fb",
]
depth = 1
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = Parliament1
fields = ["name"]
depth = 1
| 2.90625 | 3 |
src/iris/service/auth/service.py | iris-dni/iris-backend | 2 | 12787879 | <gh_stars>1-10
from pyramid import security
from lovely.pyrest.rest import RestService, rpcmethod_route, rpcmethod_view
from iris.service.rest.swagger import swagger_reduce_response
from iris.service.security.security import login_user, logout_user
from iris.service.content.user import SessionUser
from ..endpoint import EndpointErrorMixin, BadRequest
from ..errors import Errors
from .ssotoken import SSOToken
from .sso import get_or_create_sso_user, verify_message
@RestService("auth_api")
class AuthService(EndpointErrorMixin):
def __init__(self, request):
self.request = request
@rpcmethod_route(request_method='OPTIONS',
route_suffix='/whoami')
@rpcmethod_view(http_cache=0,
permission=security.NO_PERMISSION_REQUIRED)
def whoamiOptions(self, **kwargs):
return {}
@rpcmethod_route(request_method='GET',
route_suffix='/whoami')
@rpcmethod_view(http_cache=0)
@swagger_reduce_response
def whoami(self, **kwargs):
try:
self.request.user = self._ssouser(allow_token_login=True)
except BadRequest as e:
if e.error_code != Errors.no_parameters:
raise
return self._whoami()
def _whoami(self):
user = self.request.user
authenticated = (user is not None
and not SessionUser.is_session_user(user)
)
result = {
'status': authenticated and 'ok' or 'unauthenticated'
}
if authenticated:
result['data'] = user.get_source()
return result
@rpcmethod_route(request_method='OPTIONS',
route_suffix='/ssologin')
@rpcmethod_view(http_cache=0,
permission=security.NO_PERMISSION_REQUIRED)
def ssologinOptions(self, **kwargs):
return {}
@rpcmethod_route(request_method='POST',
route_suffix='/ssologin')
@rpcmethod_view(http_cache=0)
@swagger_reduce_response
def ssologin(self, **kwargs):
self._ssouser(allow_login=True)
return self._whoami()
def _ssouser(self, allow_login=False, allow_token_login=False):
"""Get the sso user from sso data
Data can be provided as sso/apikey combination or as token.
If the sso data is empty or contains no email ('{}') a logout is
performed on self.request.response.
allow_login=True will also login the user
allow_token_login=True will login the user if there was a token used
"""
data = self.request.swagger_data
sso = data.get('sso')
token = data.get('token')
if (sso is None) == (token is None):
# at least one of 'sso' or 'token' must be provided but not
# both together.
if sso is None:
raise self.bad_request(Errors.no_parameters)
raise self.bad_request(Errors.too_many_parameters)
ssodata = self.request.sso_data()
if ssodata is None:
logout_user(self.request, self.request.response)
return None
# With sso data we can get the sso user and login
user = get_or_create_sso_user(ssodata)
if (user is not None
and (allow_login
or (token is not None and allow_token_login)
)
):
login_user(self.request, self.request.response, user)
else:
logout_user(self.request, self.request.response)
return user
@rpcmethod_route(request_method='OPTIONS',
route_suffix='/ssotoken')
@rpcmethod_view(http_cache=0,
permission=security.NO_PERMISSION_REQUIRED)
def ssotokenOptions(self, **kwargs):
return {}
@rpcmethod_route(request_method='POST',
route_suffix='/ssotoken')
@rpcmethod_view(http_cache=0)
@swagger_reduce_response
def ssotoken(self, **kwargs):
data = self.request.swagger_data
sso = data['sso']
apikey = data['apikey']
try:
verify_message(sso, apikey)
except ValueError:
raise self.bad_request(Errors.sso_invalid_data)
except KeyError:
raise self.bad_request(Errors.sso_unknown_api_key)
token = SSOToken(sso=sso, apikey=apikey)
token.store(refresh=True)
return {
"token": token.token
}
@rpcmethod_route(request_method='OPTIONS',
route_suffix='/logout')
@rpcmethod_view(http_cache=0,
permission=security.NO_PERMISSION_REQUIRED)
def logoutOptions(self, **kwargs):
return {}
@rpcmethod_route(request_method='POST',
route_suffix='/logout')
@rpcmethod_view(http_cache=0)
@swagger_reduce_response
def logout(self, **kwargs):
logout_user(self.request, self.request.response)
return {}
| 1.992188 | 2 |
recommend_service/recommend_service.py | MakingL/music_recommendation | 53 | 12787880 | <reponame>MakingL/music_recommendation
import os
from flask import Flask, request, json, Response, abort
from keras import backend as K
import dl_recommend.recommand as recommend_dl
from collaborative_filtering import cf_recommendation
from download_juno.download_juno import JunoDownload
app = Flask(__name__)
@app.route('/')
def hello_world():
return abort(404)
@app.route('/cf_recommend', methods=["POST", "GET"])
def get_cf_recommendation():
""" 获取 CF 推荐结果 """
json_data = request.get_json()
message = json_data.get("msg")
if message is None:
# 参数不对
return "No cf_data"
elif message != "cf_recommend":
return "No cf_data"
raw_uid = json_data.get("raw_uid")
recommend_result = cf_recommendation.collaborative_fitlering(raw_uid)
return Response(json.dumps(recommend_result), mimetype='application/json')
@app.route('/dl_recommend', methods=["POST"])
def get_dl_recommendation():
""" 获取 DL 推荐结果 """
json_data = request.get_json()
message = json_data.get("msg")
if message is None:
# 参数不对
return "No dl_data"
elif message != "dl_recommend":
return "No dl_data"
track_path = json_data.get("track_path")
if not os.path.exists(track_path):
raise RuntimeError("Track path not exists: {}".format(track_path))
try:
recommend_object = recommend_dl.Recommendation(track_path)
track_genre, recommend_result = recommend_object.recommend_similar_track()
except Exception as e:
# Keras 目前不支持 Flask 多进程,若产生冲突,取消之前的任务
K.clear_session()
recommend_result = "500"
return recommend_result
return Response(json.dumps(recommend_result), mimetype='application/json')
@app.route('/download_juno', methods=["POST"])
def request_download_juno():
json_data = request.get_json()
message = json_data.get("msg")
if message is None:
# 参数不对
return "No msg_data"
elif message != "download_juno":
return "No msg_data"
track_url = json_data.get("track_url")
juno_download = JunoDownload()
data = juno_download.download_track(track_url)
if data is None:
return "500"
return data
if __name__ == '__main__':
app.run(host="0.0.0.0",
debug=False,
port=6016)
| 2.53125 | 3 |
tools/bitshares-account_balances.py | nomeycoin/extinction-event- | 0 | 12787881 | <gh_stars>0
import websocket #pip install websocket-client
websocket.enableTrace(False)
from ast import literal_eval as literal
def database_call(node, call):
while 1:
try:
call = call.replace("'",'"') # never use single quotes
ws = websocket.create_connection(node)
print('')
print((call.split(',"params":')[1]).rstrip('}'))
print('-----------------------------------------------------------')
ws.send(call)
# 'result' key of literally evaluated
# string representation of dictionary from websocket
ret = literal(ws.recv())['result']
print (ret)
ws.close()
return ret
except Exception as e:
print (e.args)
pass
def account_balances(node, account_name):
Z = '{"id":1,"method":"call","params":["database",'
# make call for raw account balances as returned by api
get_named_account_balances = Z + '"get_named_account_balances",["%s", [] ]]}' % (account_name)
raw_balances = database_call(node, get_named_account_balances)
# make list of asset_id's in raw account balances
asset_ids = []
for i in range(len(raw_balances)):
asset_ids.append(raw_balances[i]['asset_id'])
# make a second api request for additional data about each asset
get_assets = Z + '"get_assets",[%s]]}' % asset_ids
raw_assets = database_call(node, get_assets)
# create a common key "asset_id" for both list of dicts
# also extract the symbol and precision
id_sym_prec = []
for i in range(len(raw_assets)):
id_sym_prec.append({'asset_id':raw_assets[i]['id'],
'symbol':raw_assets[i]['symbol'],
'precision':raw_assets[i]['precision'], })
# merge the two list of dicts with common key "asset_id"
data = {}
lists = [raw_balances, id_sym_prec]
for each_list in lists:
for each_dict in each_list:
data.setdefault(each_dict['asset_id'], {}).update(each_dict)
# convert back to list
data = list(data.values())
# create a new dictionary containing only the symbol and quantity
ret = {}
for i in range(len(data)):
qty = float(data[i]['amount'])/10**float(data[i]['precision'])
ret[data[i]['symbol']] = qty
return raw_balances, ret
#node = 'wss://bts-seoul.clockwork.gr' # websocket address
node = 'wss://api.bts.mobi/wss'
account_name = 'abc123' # string
raw_balances, balances = account_balances(node, account_name)
print('')
print('balances')
print(balances)
| 2.875 | 3 |
B2G/gecko/testing/mozbase/mozrunner/mozrunner/runner.py | wilebeast/FireFox-OS | 3 | 12787882 | <gh_stars>1-10
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
__all__ = ['Runner', 'ThunderbirdRunner', 'FirefoxRunner', 'runners', 'CLI', 'cli', 'package_metadata']
import mozinfo
import optparse
import os
import platform
import subprocess
import sys
import ConfigParser
from utils import get_metadata_from_egg
from utils import findInPath
from mozprofile import *
from mozprocess.processhandler import ProcessHandler
if mozinfo.isMac:
from plistlib import readPlist
package_metadata = get_metadata_from_egg('mozrunner')
# Map of debugging programs to information about them
# from http://mxr.mozilla.org/mozilla-central/source/build/automationutils.py#59
debuggers = {'gdb': {'interactive': True,
'args': ['-q', '--args'],},
'valgrind': {'interactive': False,
'args': ['--leak-check=full']}
}
def debugger_arguments(debugger, arguments=None, interactive=None):
"""
finds debugger arguments from debugger given and defaults
* debugger : debugger name or path to debugger
* arguments : arguments to the debugger, or None to use defaults
* interactive : whether the debugger should be run in interactive mode, or None to use default
"""
# find debugger executable if not a file
executable = debugger
if not os.path.exists(executable):
executable = findInPath(debugger)
if executable is None:
raise Exception("Path to '%s' not found" % debugger)
# if debugger not in dictionary of knowns return defaults
dirname, debugger = os.path.split(debugger)
if debugger not in debuggers:
return ([executable] + (arguments or []), bool(interactive))
# otherwise use the dictionary values for arguments unless specified
if arguments is None:
arguments = debuggers[debugger].get('args', [])
if interactive is None:
interactive = debuggers[debugger].get('interactive', False)
return ([executable] + arguments, interactive)
class Runner(object):
"""Handles all running operations. Finds bins, runs and kills the process."""
profile_class = Profile # profile class to use by default
@classmethod
def create(cls, binary=None, cmdargs=None, env=None, kp_kwargs=None, profile_args=None,
clean_profile=True, process_class=ProcessHandler):
profile = cls.profile_class(**(profile_args or {}))
return cls(profile, binary=binary, cmdargs=cmdargs, env=env, kp_kwargs=kp_kwargs,
clean_profile=clean_profile, process_class=process_class)
def __init__(self, profile, binary, cmdargs=None, env=None,
kp_kwargs=None, clean_profile=True, process_class=ProcessHandler):
self.process_handler = None
self.process_class = process_class
self.profile = profile
self.clean_profile = clean_profile
# find the binary
self.binary = binary
if not self.binary:
raise Exception("Binary not specified")
if not os.path.exists(self.binary):
raise OSError("Binary path does not exist: %s" % self.binary)
# allow Mac binaries to be specified as an app bundle
plist = '%s/Contents/Info.plist' % self.binary
if mozinfo.isMac and os.path.exists(plist):
info = readPlist(plist)
self.binary = os.path.join(self.binary, "Contents/MacOS/",
info['CFBundleExecutable'])
self.cmdargs = cmdargs or []
_cmdargs = [i for i in self.cmdargs
if i != '-foreground']
if len(_cmdargs) != len(self.cmdargs):
# foreground should be last; see
# - https://bugzilla.mozilla.org/show_bug.cgi?id=625614
# - https://bugzilla.mozilla.org/show_bug.cgi?id=626826
self.cmdargs = _cmdargs
self.cmdargs.append('-foreground')
# process environment
if env is None:
self.env = os.environ.copy()
else:
self.env = env.copy()
# allows you to run an instance of Firefox separately from any other instances
self.env['MOZ_NO_REMOTE'] = '1'
# keeps Firefox attached to the terminal window after it starts
self.env['NO_EM_RESTART'] = '1'
# set the library path if needed on linux
if sys.platform == 'linux2' and self.binary.endswith('-bin'):
dirname = os.path.dirname(self.binary)
if os.environ.get('LD_LIBRARY_PATH', None):
self.env['LD_LIBRARY_PATH'] = '%s:%s' % (os.environ['LD_LIBRARY_PATH'], dirname)
else:
self.env['LD_LIBRARY_PATH'] = dirname
# arguments for ProfessHandler.Process
self.kp_kwargs = kp_kwargs or {}
@property
def command(self):
"""Returns the command list to run."""
return [self.binary, '-profile', self.profile.profile]
def get_repositoryInfo(self):
"""Read repository information from application.ini and platform.ini."""
config = ConfigParser.RawConfigParser()
dirname = os.path.dirname(self.binary)
repository = { }
for file, section in [('application', 'App'), ('platform', 'Build')]:
config.read(os.path.join(dirname, '%s.ini' % file))
for key, id in [('SourceRepository', 'repository'),
('SourceStamp', 'changeset')]:
try:
repository['%s_%s' % (file, id)] = config.get(section, key);
except:
repository['%s_%s' % (file, id)] = None
return repository
def is_running(self):
return self.process_handler is not None
def start(self, debug_args=None, interactive=False, timeout=None, outputTimeout=None):
"""
Run self.command in the proper environment.
- debug_args: arguments for the debugger
- interactive: uses subprocess.Popen directly
- read_output: sends program output to stdout [default=False]
- timeout: see process_handler.waitForFinish
- outputTimeout: see process_handler.waitForFinish
"""
# ensure you are stopped
self.stop()
# ensure the profile exists
if not self.profile.exists():
self.profile.reset()
assert self.profile.exists(), "%s : failure to reset profile" % self.__class__.__name__
cmd = self._wrap_command(self.command+self.cmdargs)
# attach a debugger, if specified
if debug_args:
cmd = list(debug_args) + cmd
if interactive:
self.process_handler = subprocess.Popen(cmd, env=self.env)
# TODO: other arguments
else:
# this run uses the managed processhandler
self.process_handler = self.process_class(cmd, env=self.env, **self.kp_kwargs)
self.process_handler.run(timeout, outputTimeout)
def wait(self, timeout=None):
"""
Wait for the app to exit.
If timeout is not None, will return after timeout seconds.
Use is_running() to determine whether or not a timeout occured.
Timeout is ignored if interactive was set to True.
"""
if self.process_handler is None:
return
if isinstance(self.process_handler, subprocess.Popen):
self.process_handler.wait()
else:
self.process_handler.wait(timeout)
if self.process_handler.proc.poll() is None:
# waitForFinish timed out
return
self.process_handler = None
def stop(self):
"""Kill the app"""
if self.process_handler is None:
return
self.process_handler.kill()
self.process_handler = None
def reset(self):
"""
reset the runner between runs
currently, only resets the profile, but probably should do more
"""
self.profile.reset()
def cleanup(self):
self.stop()
if self.clean_profile:
self.profile.cleanup()
def _wrap_command(self, cmd):
"""
If running on OS X 10.5 or older, wrap |cmd| so that it will
be executed as an i386 binary, in case it's a 32-bit/64-bit universal
binary.
"""
if mozinfo.isMac and hasattr(platform, 'mac_ver') and \
platform.mac_ver()[0][:4] < '10.6':
return ["arch", "-arch", "i386"] + cmd
return cmd
__del__ = cleanup
class FirefoxRunner(Runner):
"""Specialized Runner subclass for running Firefox."""
profile_class = FirefoxProfile
def __init__(self, profile, binary=None, **kwargs):
# take the binary from BROWSER_PATH environment variable
if (not binary) and 'BROWSER_PATH' in os.environ:
binary = os.environ['BROWSER_PATH']
Runner.__init__(self, profile, binary, **kwargs)
class ThunderbirdRunner(Runner):
"""Specialized Runner subclass for running Thunderbird"""
profile_class = ThunderbirdProfile
runners = {'firefox': FirefoxRunner,
'thunderbird': ThunderbirdRunner}
class CLI(MozProfileCLI):
"""Command line interface."""
module = "mozrunner"
def __init__(self, args=sys.argv[1:]):
"""
Setup command line parser and parse arguments
- args : command line arguments
"""
self.metadata = getattr(sys.modules[self.module],
'package_metadata',
{})
version = self.metadata.get('Version')
parser_args = {'description': self.metadata.get('Summary')}
if version:
parser_args['version'] = "%prog " + version
self.parser = optparse.OptionParser(**parser_args)
self.add_options(self.parser)
(self.options, self.args) = self.parser.parse_args(args)
if getattr(self.options, 'info', None):
self.print_metadata()
sys.exit(0)
# choose appropriate runner and profile classes
try:
self.runner_class = runners[self.options.app]
except KeyError:
self.parser.error('Application "%s" unknown (should be one of "firefox" or "thunderbird")' % self.options.app)
def add_options(self, parser):
"""add options to the parser"""
# add profile options
MozProfileCLI.add_options(self, parser)
# add runner options
parser.add_option('-b', "--binary",
dest="binary", help="Binary path.",
metavar=None, default=None)
parser.add_option('--app', dest='app', default='firefox',
help="Application to use [DEFAULT: %default]")
parser.add_option('--app-arg', dest='appArgs',
default=[], action='append',
help="provides an argument to the test application")
parser.add_option('--debugger', dest='debugger',
help="run under a debugger, e.g. gdb or valgrind")
parser.add_option('--debugger-args', dest='debugger_args',
action='append', default=None,
help="arguments to the debugger")
parser.add_option('--interactive', dest='interactive',
action='store_true',
help="run the program interactively")
if self.metadata:
parser.add_option("--info", dest="info", default=False,
action="store_true",
help="Print module information")
### methods for introspecting data
def get_metadata_from_egg(self):
import pkg_resources
ret = {}
dist = pkg_resources.get_distribution(self.module)
if dist.has_metadata("PKG-INFO"):
for line in dist.get_metadata_lines("PKG-INFO"):
key, value = line.split(':', 1)
ret[key] = value
if dist.has_metadata("requires.txt"):
ret["Dependencies"] = "\n" + dist.get_metadata("requires.txt")
return ret
def print_metadata(self, data=("Name", "Version", "Summary", "Home-page",
"Author", "Author-email", "License", "Platform", "Dependencies")):
for key in data:
if key in self.metadata:
print key + ": " + self.metadata[key]
### methods for running
def command_args(self):
"""additional arguments for the mozilla application"""
return self.options.appArgs
def runner_args(self):
"""arguments to instantiate the runner class"""
return dict(cmdargs=self.command_args(),
binary=self.options.binary,
profile_args=self.profile_args())
def create_runner(self):
return self.runner_class.create(**self.runner_args())
def run(self):
runner = self.create_runner()
self.start(runner)
runner.cleanup()
def debugger_arguments(self):
"""
returns a 2-tuple of debugger arguments:
(debugger_arguments, interactive)
"""
debug_args = self.options.debugger_args
interactive = self.options.interactive
if self.options.debugger:
debug_args, interactive = debugger_arguments(self.options.debugger)
return debug_args, interactive
def start(self, runner):
"""Starts the runner and waits for Firefox to exit or Keyboard Interrupt.
Shoule be overwritten to provide custom running of the runner instance."""
# attach a debugger if specified
debug_args, interactive = self.debugger_arguments()
runner.start(debug_args=debug_args, interactive=interactive)
print 'Starting:', ' '.join(runner.command)
try:
runner.wait()
except KeyboardInterrupt:
runner.stop()
def cli(args=sys.argv[1:]):
CLI(args).run()
if __name__ == '__main__':
cli()
| 2.1875 | 2 |
python/dungeon-game/dungeon.py | davejlin/treehouse | 0 | 12787883 | <reponame>davejlin/treehouse<gh_stars>0
import random
CELLS = [(0,0), (0,1), (0,2),
(1,0), (1,1), (1,2),
(2,0), (2,1), (2,2)]
player = (0,0)
door = (0,0)
dragon = (0,0)
def set_initial_positions():
while True:
player = random.choice(CELLS)
door = random.choice(CELLS)
dragon = random.choice(CELLS)
if player != door and door != dragon and player != dragon:
break
return player, door, dragon
def move_player(player, move):
if move == 'UP':
player = (player[0]-1, player[1])
elif move == 'DOWN':
player = (player[0]+1, player[1])
elif move == 'RIGHT':
player = (player[0], player[1]+1)
elif move == 'LEFT':
player = (player[0], player[1]-1)
return player
def get_moves(player):
xPlayer = player[1]
yPlayer = player[0]
moves = []
if yPlayer > 0:
moves.append('UP')
if yPlayer < 2:
moves.append('DOWN')
if xPlayer < 2:
moves.append('RIGHT')
if xPlayer > 0:
moves.append('LEFT')
return moves
def check_win_lose(player, door, dragon):
if player == door:
print("\n*** Congratulations! You escaped!! ***\n")
return True
elif player == dragon:
print("\n*** Sorry, the dragon got you! ***\n")
return True
else:
return False
def draw_map(player):
print(' _ _ _')
tile = '|{}'
for idx, cell in enumerate(CELLS):
if idx in [0, 1, 3, 4, 6, 7]:
if cell == player:
print(tile.format('X'), end = '')
else:
print(tile.format('_'), end = '')
else:
if cell == player:
print(tile.format('X|'))
else:
print(tile.format('_|'))
# main
print("Welcome to the dungeon!")
(player, door, dragon) = set_initial_positions()
while True:
print("You're currently in room {}.".format(player))
draw_map(player)
valid_moves = get_moves(player)
print("You can move {}.".format(valid_moves))
print("Enter QUIT to quit.")
move = input("> ")
move = move.upper()
if move == 'QUIT':
break
if move not in valid_moves:
print("\n*** Sorry, you cannot move {}. Try again!\n".format(move))
continue
player = move_player(player, move)
if check_win_lose(player, door, dragon):
break
| 3.84375 | 4 |
python3/shortest_path_in_binary_matrix.py | joshiaj7/CodingChallenges | 1 | 12787884 | <reponame>joshiaj7/CodingChallenges
import heapq
"""
Best Solution
BFS approach
Space : O(n)
Time : O(n**2)
"""
class Solution:
def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:
max_row = len(grid) - 1
max_col = len(grid[0]) - 1
directions = [
(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
# Helper function to find the neighbors of a given cell.
def get_neighbours(row, col):
for row_difference, col_difference in directions:
new_row = row + row_difference
new_col = col + col_difference
if not(0 <= new_row <= max_row and 0 <= new_col <= max_col):
continue
if grid[new_row][new_col] != 0:
continue
yield (new_row, new_col)
# Helper function for the A* heuristic.
def best_case_estimate(row, col):
return max(max_row - row, max_col - col)
# Check that the first and last cells are open.
if grid[0][0] or grid[max_row][max_col]:
return -1
# Set up the A* search.
visited = set()
# Entries on the priority queue are of the form
# (total distance estimate, distance so far, (cell row, cell col))
priority_queue = [(1 + best_case_estimate(0, 0), 1, (0, 0))]
while priority_queue:
estimate, distance, cell = heapq.heappop(priority_queue)
if cell in visited:
continue
if cell == (max_row, max_col):
return distance
visited.add(cell)
for neighbour in get_neighbours(*cell):
# The check here isn't necessary for correctness, but it
# leads to a substantial performance gain.
if neighbour in visited:
continue
estimate = best_case_estimate(*neighbour) + distance + 1
entry = (estimate, distance + 1, neighbour)
heapq.heappush(priority_queue, entry)
# There was no path.
return -1
| 3.453125 | 3 |
orch/commands/run_suite.py | freecraver/zimp-orchestrator | 0 | 12787885 | <reponame>freecraver/zimp-orchestrator
from cleo import Command
import logging
import glob
from experiment.config import Config
from experiment.experiment import Experiment
class RunSuite(Command):
"""
runs a whole experiment suite
suite
{config_folder=runs : Folder containing experiment config files}
"""
def handle(self):
config_folder = self.argument('config_folder')
logging.basicConfig(level=logging.INFO)
config_files = glob.glob(config_folder + '*.yaml')
logging.info(f'Found {len(config_files)} experiments')
for idx, experiment_config in enumerate(config_files[::-1]):
logging.info(f'Starting with experiment {idx} - {experiment_config}')
experiment = Experiment(Config.from_yaml(experiment_config))
if experiment.exists_in_mlflow():
logging.info('Skipping already performed experiment')
continue
experiment.run()
| 2.3125 | 2 |
datameta/errors.py | ghga-de/datameta | 7 | 12787886 | # Copyright 2021 Universität Tübingen, DKFZ and EMBL for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden
from . import resource, models
def get_error(
base_error,
exception_label: str,
messages: List[str],
fields: Optional[List[Optional[str]]] = None,
entities: Optional[List[Optional[str]]] = None,
):
"""Generate an Error based on the base_error with a custom message
"""
assert len(messages) > 0, "messages cannot be empty"
assert fields is None or len(fields) == len(messages), (
"The fields list must be of same length as messages."
)
assert entities is None or len(entities) == len(messages), (
"The entities list must be of same length as messages."
)
response_body = []
for idx, msg in enumerate(messages):
err : dict = {
"exception": exception_label,
}
if entities is not None and entities[idx] is not None:
err.update(
{
"entity": resource.get_identifier(entities[idx]) if isinstance(entities[idx], models.db.Base) else entities[idx]
}
)
if fields is not None and fields[idx] is not None:
err.update(
{
"field": fields[idx]
}
)
err["message"] = msg
response_body.append(err)
return base_error(json = response_body)
def get_validation_error(
messages: List[str],
fields: Optional[List[Optional[str]]] = None,
entities: Optional[List[Optional[str]]] = None
) -> HTTPBadRequest:
"""Generate a Validation Error (400) with custom message
"""
return get_error(
base_error = HTTPBadRequest,
exception_label = "ValidationError",
messages = messages,
fields = fields,
entities = entities
)
def get_not_modifiable_error() -> HTTPForbidden:
"""Generate a HTTPForbidden (403) error informing
that the resource cannot be modified
"""
return get_error(
base_error = HTTPForbidden,
exception_label = "ResourceNotModifiableError",
messages = ["The resource cannot be modified"],
)
| 1.898438 | 2 |
Scripts/simulation/sickness/sickness_commands.py | velocist/TS4CheatsInfo | 0 | 12787887 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sickness\sickness_commands.py
# Compiled at: 2017-08-17 00:31:58
# Size of source mod 2**32: 2877 bytes
from server_commands.argument_helpers import OptionalSimInfoParam, TunableInstanceParam, get_optional_target
import services, sims4
@sims4.commands.Command('sickness.make_sick', command_type=(sims4.commands.CommandType.Automation))
def make_sick(opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
if target is None:
return False
services.get_sickness_service().make_sick(target)
@sims4.commands.Command('sickness.add', command_type=(sims4.commands.CommandType.Automation))
def add_sickness(sickness_type: TunableInstanceParam(sims4.resources.Types.SICKNESS), opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
if target is None:
return False
services.get_sickness_service().make_sick(target, sickness=sickness_type)
@sims4.commands.Command('sickness.remove', command_type=(sims4.commands.CommandType.Automation))
def remove_sickness(opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
if target is None:
return False
services.get_sickness_service().remove_sickness(target)
@sims4.commands.Command('sickness.distribute_sicknesses')
def distribute_sicknesses(_connection=None):
services.get_sickness_service().trigger_sickness_distribution()
@sims4.commands.Command('sickness.update_diagnosis')
def update_diagnosis(opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
return target is None or target.has_sickness_tracking() or False
target.current_sickness.update_diagnosis(target)
@sims4.commands.Command('sickness.clear_diagnosis')
def clear_diagnosis(opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
return target is None or target.has_sickness_tracking() or False
target.sickness_tracker.clear_diagnosis_data() | 1.882813 | 2 |
Offline tasks/Offline_task_10/zad10.py | Szymon-Budziak/ASD_exercises_solutions | 7 | 12787888 | <gh_stars>1-10
class BSTNode:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
self.parent = None
def find(root, key):
while root is not None:
if root.key == key:
return root
elif key < root.key:
root = root.left
else:
root = root.right
return None
def insert(root, key):
previous = None
while root is not None:
if root.key > key:
previous = root
root = root.left
else:
previous = root
root = root.right
if previous.key == key:
return False
elif previous.parent is not None and previous.parent.key == key:
return False
elif previous.left is not None and previous.left.key == key:
return False
elif previous.right is not None and previous.right.key == key:
return False
if key < previous.key:
previous.left = BSTNode(key)
previous.left.parent = previous
return True
elif key > previous.key:
previous.right = BSTNode(key)
previous.right.parent = previous
return True
def minimum(root):
while root.left is not None:
root = root.left
return root.key
def successor(root, actual_root_value):
actual_root = find(root, actual_root_value)
value = actual_root.key
if actual_root.right is not None:
return minimum(actual_root.right)
while actual_root.parent is not None and actual_root.parent.key < actual_root.key:
actual_root = actual_root.parent
if actual_root.parent is not None:
if actual_root.parent.key < value:
return None
return actual_root.parent.key
if actual_root.key < value:
return None
def remove(root, key):
actual_root = find(root, key)
if actual_root is None:
return False
elif actual_root.right is None:
if actual_root.left is None:
if actual_root.parent.left is not None and actual_root.parent.left.key == actual_root.key:
actual_root.parent.left = None
return True
else:
actual_root.parent.right = None
return True
else:
if actual_root.parent.left is not None and actual_root.parent.left.key == actual_root.key:
actual_root.parent.left = actual_root.left
return True
elif actual_root.parent.right is not None and actual_root.parent.right.key == actual_root.key:
actual_root.parent.right = actual_root.right
return True
elif actual_root.left is None:
if actual_root.parent.left is not None and actual_root.parent.left.key == actual_root.key:
actual_root.parent.left = actual_root.right
return True
if actual_root.parent.right is not None and actual_root.parent.right.key == actual_root.key:
actual_root.parent.right = actual_root.right
return True
else:
root_value = successor(root, actual_root.key)
remove(root, root_value)
actual_root.key = root_value
return True
root = BSTNode(20)
print(insert(root, 10))
print(insert(root, 27))
print(insert(root, 5))
print(remove(root, 100))
print(insert(root, 15))
print(insert(root, 28))
print(insert(root, 30))
print(remove(root, 30))
print(insert(root, 35))
print(insert(root, 28))
print(insert(root, 40))
print(insert(root, 40))
print(remove(root, 20))
print(remove(root, 40))
print(remove(root, 13))
print(remove(root, 400))
print(remove(root, 30))
| 3.625 | 4 |
sdk/python/pulumi_vault/approle/auth_backend_login.py | pulumi/pulumi-vault | 10 | 12787889 | <gh_stars>1-10
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AuthBackendLoginArgs', 'AuthBackendLogin']
@pulumi.input_type
class AuthBackendLoginArgs:
def __init__(__self__, *,
role_id: pulumi.Input[str],
backend: Optional[pulumi.Input[str]] = None,
secret_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AuthBackendLogin resource.
:param pulumi.Input[str] role_id: The ID of the role to log in with.
:param pulumi.Input[str] backend: The unique path of the Vault backend to log in with.
:param pulumi.Input[str] secret_id: The secret ID of the role to log in with. Required
unless `bind_secret_id` is set to false on the role.
"""
pulumi.set(__self__, "role_id", role_id)
if backend is not None:
pulumi.set(__self__, "backend", backend)
if secret_id is not None:
pulumi.set(__self__, "secret_id", secret_id)
@property
@pulumi.getter(name="roleId")
def role_id(self) -> pulumi.Input[str]:
"""
The ID of the role to log in with.
"""
return pulumi.get(self, "role_id")
@role_id.setter
def role_id(self, value: pulumi.Input[str]):
pulumi.set(self, "role_id", value)
@property
@pulumi.getter
def backend(self) -> Optional[pulumi.Input[str]]:
"""
The unique path of the Vault backend to log in with.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> Optional[pulumi.Input[str]]:
"""
The secret ID of the role to log in with. Required
unless `bind_secret_id` is set to false on the role.
"""
return pulumi.get(self, "secret_id")
@secret_id.setter
def secret_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_id", value)
@pulumi.input_type
class _AuthBackendLoginState:
def __init__(__self__, *,
accessor: Optional[pulumi.Input[str]] = None,
backend: Optional[pulumi.Input[str]] = None,
client_token: Optional[pulumi.Input[str]] = None,
lease_duration: Optional[pulumi.Input[int]] = None,
lease_started: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
renewable: Optional[pulumi.Input[bool]] = None,
role_id: Optional[pulumi.Input[str]] = None,
secret_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AuthBackendLogin resources.
:param pulumi.Input[str] accessor: The accessor for the token.
:param pulumi.Input[str] backend: The unique path of the Vault backend to log in with.
:param pulumi.Input[str] client_token: The Vault token created.
:param pulumi.Input[int] lease_duration: How long the token is valid for, in seconds.
:param pulumi.Input[str] lease_started: The date and time the lease started, in RFC 3339 format.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The metadata associated with the token.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: A list of policies applied to the token.
:param pulumi.Input[bool] renewable: Whether the token is renewable or not.
:param pulumi.Input[str] role_id: The ID of the role to log in with.
:param pulumi.Input[str] secret_id: The secret ID of the role to log in with. Required
unless `bind_secret_id` is set to false on the role.
"""
if accessor is not None:
pulumi.set(__self__, "accessor", accessor)
if backend is not None:
pulumi.set(__self__, "backend", backend)
if client_token is not None:
pulumi.set(__self__, "client_token", client_token)
if lease_duration is not None:
pulumi.set(__self__, "lease_duration", lease_duration)
if lease_started is not None:
pulumi.set(__self__, "lease_started", lease_started)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if policies is not None:
pulumi.set(__self__, "policies", policies)
if renewable is not None:
pulumi.set(__self__, "renewable", renewable)
if role_id is not None:
pulumi.set(__self__, "role_id", role_id)
if secret_id is not None:
pulumi.set(__self__, "secret_id", secret_id)
@property
@pulumi.getter
def accessor(self) -> Optional[pulumi.Input[str]]:
"""
The accessor for the token.
"""
return pulumi.get(self, "accessor")
@accessor.setter
def accessor(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accessor", value)
@property
@pulumi.getter
def backend(self) -> Optional[pulumi.Input[str]]:
"""
The unique path of the Vault backend to log in with.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter(name="clientToken")
def client_token(self) -> Optional[pulumi.Input[str]]:
"""
The Vault token created.
"""
return pulumi.get(self, "client_token")
@client_token.setter
def client_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_token", value)
@property
@pulumi.getter(name="leaseDuration")
def lease_duration(self) -> Optional[pulumi.Input[int]]:
"""
How long the token is valid for, in seconds.
"""
return pulumi.get(self, "lease_duration")
@lease_duration.setter
def lease_duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "lease_duration", value)
@property
@pulumi.getter(name="leaseStarted")
def lease_started(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the lease started, in RFC 3339 format.
"""
return pulumi.get(self, "lease_started")
@lease_started.setter
def lease_started(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lease_started", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The metadata associated with the token.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of policies applied to the token.
"""
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter
def renewable(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the token is renewable or not.
"""
return pulumi.get(self, "renewable")
@renewable.setter
def renewable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "renewable", value)
@property
@pulumi.getter(name="roleId")
def role_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the role to log in with.
"""
return pulumi.get(self, "role_id")
@role_id.setter
def role_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_id", value)
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> Optional[pulumi.Input[str]]:
"""
The secret ID of the role to log in with. Required
unless `bind_secret_id` is set to false on the role.
"""
return pulumi.get(self, "secret_id")
@secret_id.setter
def secret_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_id", value)
class AuthBackendLogin(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
secret_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Logs into Vault using the AppRole auth backend. See the [Vault
documentation](https://www.vaultproject.io/docs/auth/approle) for more
information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend: The unique path of the Vault backend to log in with.
:param pulumi.Input[str] role_id: The ID of the role to log in with.
:param pulumi.Input[str] secret_id: The secret ID of the role to log in with. Required
unless `bind_secret_id` is set to false on the role.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AuthBackendLoginArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Logs into Vault using the AppRole auth backend. See the [Vault
documentation](https://www.vaultproject.io/docs/auth/approle) for more
information.
:param str resource_name: The name of the resource.
:param AuthBackendLoginArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AuthBackendLoginArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
secret_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AuthBackendLoginArgs.__new__(AuthBackendLoginArgs)
__props__.__dict__["backend"] = backend
if role_id is None and not opts.urn:
raise TypeError("Missing required property 'role_id'")
__props__.__dict__["role_id"] = role_id
__props__.__dict__["secret_id"] = secret_id
__props__.__dict__["accessor"] = None
__props__.__dict__["client_token"] = None
__props__.__dict__["lease_duration"] = None
__props__.__dict__["lease_started"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["policies"] = None
__props__.__dict__["renewable"] = None
super(AuthBackendLogin, __self__).__init__(
'vault:appRole/authBackendLogin:AuthBackendLogin',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
accessor: Optional[pulumi.Input[str]] = None,
backend: Optional[pulumi.Input[str]] = None,
client_token: Optional[pulumi.Input[str]] = None,
lease_duration: Optional[pulumi.Input[int]] = None,
lease_started: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
renewable: Optional[pulumi.Input[bool]] = None,
role_id: Optional[pulumi.Input[str]] = None,
secret_id: Optional[pulumi.Input[str]] = None) -> 'AuthBackendLogin':
"""
Get an existing AuthBackendLogin resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accessor: The accessor for the token.
:param pulumi.Input[str] backend: The unique path of the Vault backend to log in with.
:param pulumi.Input[str] client_token: The Vault token created.
:param pulumi.Input[int] lease_duration: How long the token is valid for, in seconds.
:param pulumi.Input[str] lease_started: The date and time the lease started, in RFC 3339 format.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The metadata associated with the token.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: A list of policies applied to the token.
:param pulumi.Input[bool] renewable: Whether the token is renewable or not.
:param pulumi.Input[str] role_id: The ID of the role to log in with.
:param pulumi.Input[str] secret_id: The secret ID of the role to log in with. Required
unless `bind_secret_id` is set to false on the role.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AuthBackendLoginState.__new__(_AuthBackendLoginState)
__props__.__dict__["accessor"] = accessor
__props__.__dict__["backend"] = backend
__props__.__dict__["client_token"] = client_token
__props__.__dict__["lease_duration"] = lease_duration
__props__.__dict__["lease_started"] = lease_started
__props__.__dict__["metadata"] = metadata
__props__.__dict__["policies"] = policies
__props__.__dict__["renewable"] = renewable
__props__.__dict__["role_id"] = role_id
__props__.__dict__["secret_id"] = secret_id
return AuthBackendLogin(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def accessor(self) -> pulumi.Output[str]:
"""
The accessor for the token.
"""
return pulumi.get(self, "accessor")
@property
@pulumi.getter
def backend(self) -> pulumi.Output[Optional[str]]:
"""
The unique path of the Vault backend to log in with.
"""
return pulumi.get(self, "backend")
@property
@pulumi.getter(name="clientToken")
def client_token(self) -> pulumi.Output[str]:
"""
The Vault token created.
"""
return pulumi.get(self, "client_token")
@property
@pulumi.getter(name="leaseDuration")
def lease_duration(self) -> pulumi.Output[int]:
"""
How long the token is valid for, in seconds.
"""
return pulumi.get(self, "lease_duration")
@property
@pulumi.getter(name="leaseStarted")
def lease_started(self) -> pulumi.Output[str]:
"""
The date and time the lease started, in RFC 3339 format.
"""
return pulumi.get(self, "lease_started")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Mapping[str, str]]:
"""
The metadata associated with the token.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def policies(self) -> pulumi.Output[Sequence[str]]:
"""
A list of policies applied to the token.
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter
def renewable(self) -> pulumi.Output[bool]:
"""
Whether the token is renewable or not.
"""
return pulumi.get(self, "renewable")
@property
@pulumi.getter(name="roleId")
def role_id(self) -> pulumi.Output[str]:
"""
The ID of the role to log in with.
"""
return pulumi.get(self, "role_id")
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> pulumi.Output[Optional[str]]:
"""
The secret ID of the role to log in with. Required
unless `bind_secret_id` is set to false on the role.
"""
return pulumi.get(self, "secret_id")
| 1.9375 | 2 |
src/cogs/arxivAPI.py | sdk2k01/S4DS-Bot | 3 | 12787890 | <gh_stars>1-10
import discord
import urllib, urllib.request
from xml.dom import minidom
from discord.ext import commands
class arxiv(commands.Cog):
def __init__(self, client):
self.client = client
#Gets the top search result from arXiv API and displays the paper along with related details (including summary) in an embed
arxivshow_help ='''***Description :***
Shows the top searched result\n
***Syntax :***
`<prefix>arxivshow <keyword>`'''
@commands.command(name ="arxivshow", help = arxivshow_help)
async def arxivshow(self, ctx, *, search):
query = search.replace(" ", "+")
url = f'http://export.arxiv.org/api/query?search_query=all:{query}&start=0&max_results=1'
data = urllib.request.urlopen(url)
mytree = minidom.parseString(data.read().decode('utf-8'))
entry = mytree.getElementsByTagName('entry')
for y in entry:
published = y.getElementsByTagName('published')[0]
title = y.getElementsByTagName('title')[0]
summary = y.getElementsByTagName('summary')[0]
author = y.getElementsByTagName('author')
authors = ''
for x in author:
a_name = x.getElementsByTagName('name')[0]
authors = authors + (a_name.firstChild.data) + ', '
authors = authors[:-2]
link = y.getElementsByTagName('link')[0]
link1 = link.attributes['href'].value
link2 = y.getElementsByTagName('link')[1]
link3 = link2.attributes['href'].value
embed = discord.Embed(title = f'Title: {title.firstChild.data}', description = f'Published on: {published.firstChild.data}', color = discord.Colour.blue())
embed.set_author(name = f'{authors}')
await ctx.send(embed = embed)
embed = discord.Embed(title = 'Summary: ', description = f'{summary.firstChild.data}', color = discord.Colour.green())
embed.add_field(name = 'Link: ', value = f'{link1}', inline = False)
embed.add_field(name = 'Download link: ', value = f'{link3}', inline = False)
await ctx.send(embed = embed)
await ctx.send('.................................................................................................................................................')
#Gets the top 5 search results (sorted as last updated) from arXiv API and displays respective papers along with related details (excluding summary) in succesive embeds
arxivshowlud_help ='''***Description :***
Shows top 5 paper on the basis of last updated date\n
***Syntax :***
`<prefix>arxivshowlud <keyword>`'''
@commands.command(name ="arxivshowlud", help = arxivshowlud_help)
async def arxivshowlud(self, ctx, *, search):
query = search.replace(" ", "+")
url = f'http://export.arxiv.org/api/query?search_query=all:{query}&start=0&max_results=5&sortBy=lastUpdatedDate&sortOrder=ascending'
data = urllib.request.urlopen(url)
mytree = minidom.parseString(data.read().decode('utf-8'))
entry = mytree.getElementsByTagName('entry')
for y in entry:
published = y.getElementsByTagName('published')[0]
title = y.getElementsByTagName('title')[0]
author = y.getElementsByTagName('author')
authors = ''
for x in author:
a_name = x.getElementsByTagName('name')[0]
authors = authors + (a_name.firstChild.data) + ', '
authors = authors[:-2]
link = y.getElementsByTagName('link')[0]
link1 = link.attributes['href'].value
link2 = y.getElementsByTagName('link')[1]
link3 = link2.attributes['href'].value
embed = discord.Embed(title = f'Title: {title.firstChild.data}', description = f'Published on: {published.firstChild.data}', color = discord.Colour.blue())
embed.set_author(name = f'{authors}')
embed.add_field(name = 'Link: ', value = f'{link1}', inline = False)
embed.add_field(name = 'Download link: ', value = f'{link3}', inline = False)
await ctx.send(embed = embed)
await ctx.send('.................................................................................................................................................')
#Gets the top 5 search results (sorted as relevance) from arXiv API and displays respective papers along with related details (excluding summary) in succesive embeds
arxivshowr_help ='''***Description :***
Shows top 5 paper on the basis of relevance\n
***Syntax :***
`<prefix>arxivshowr <keyword>`'''
@commands.command(name ="arxivshowr", help = arxivshowr_help)
async def arxivshowr(self, ctx, *, search):
query = search.replace(" ", "+")
url = f'http://export.arxiv.org/api/query?search_query=all:{query}&start=0&max_results=5&sortBy=relevance&sortOrder=ascending'
data = urllib.request.urlopen(url)
mytree = minidom.parseString(data.read().decode('utf-8'))
entry = mytree.getElementsByTagName('entry')
for y in entry:
published = y.getElementsByTagName('published')[0]
title = y.getElementsByTagName('title')[0]
author = y.getElementsByTagName('author')
authors = ''
for x in author:
a_name = x.getElementsByTagName('name')[0]
authors = authors+(a_name.firstChild.data) + ', '
authors = authors[:-2]
link = y.getElementsByTagName('link')[0]
link1 = link.attributes['href'].value
link2 = y.getElementsByTagName('link')[1]
link3 = link2.attributes['href'].value
embed = discord.Embed(title = f'Title: {title.firstChild.data}', description = f'Published on: {published.firstChild.data}', color = discord.Colour.blue())
embed.set_author(name = f'{authors}')
embed.add_field(name = 'Link: ', value = f'{link1}', inline = False)
embed.add_field(name = 'Download link: ', value = f'{link3}', inline = False)
await ctx.send(embed = embed)
await ctx.send('.................................................................................................................................................')
#Gets the top 5 search results (sorted as submitted date) from arXiv API and displays respective papers along with related details (excluding summary) in succesive embeds
arxivshowsd_help ='''***Description :***
Shows top 5 paper and sorts the result in order of submitted date\n
***Syntax :***
`<prefix>arxivshowsd <keyword>`'''
@commands.command(name="arxivshowsd", help=arxivshowsd_help)
async def arxivshowsd(self, ctx, *, search):
query = search.replace(" ", "+")
url = f'http://export.arxiv.org/api/query?search_query=all:{query}&start=0&max_results=5&sortBy=submittedDate&sortOrder=ascending'
data = urllib.request.urlopen(url)
mytree = minidom.parseString(data.read().decode('utf-8'))
entry = mytree.getElementsByTagName('entry')
for y in entry:
published = y.getElementsByTagName('published')[0]
title = y.getElementsByTagName('title')[0]
author = y.getElementsByTagName('author')
authors = ''
for x in author:
a_name = x.getElementsByTagName('name')[0]
authors = authors + (a_name.firstChild.data) + ', '
authors = authors[:-2]
link = y.getElementsByTagName('link')[0]
link1 = link.attributes['href'].value
link2 = y.getElementsByTagName('link')[1]
link3 = link2.attributes['href'].value
embed = discord.Embed(title = f'Title: {title.firstChild.data}', description = f'Published on: {published.firstChild.data}', color = discord.Colour.blue())
embed.set_author(name = f'{authors}')
embed.add_field(name = 'Link: ', value = f'{link1}', inline = False)
embed.add_field(name = 'Download link: ', value = f'{link3}', inline = False)
await ctx.send(embed = embed)
await ctx.send('.................................................................................................................................................')
#Gets the top 5 search results from arXiv API and displays respective papers along with related details (including summary) in succesive embeds
arxivshowsumm_help ='''***Description :***
Shows top 5 paper alongwith it's summary\n
***Syntax :***
`<prefix>arxivshowsumm <keyword>`'''
@commands.command(name ="arxivshowsumm", help = arxivshowsumm_help)
async def arxivshowsumm(self, ctx, *, search):
query = search.replace(" ", "+")
url = f'http://export.arxiv.org/api/query?search_query=all:{query}&start=0&max_results=5'
data = urllib.request.urlopen(url)
mytree = minidom.parseString(data.read().decode('utf-8'))
entry = mytree.getElementsByTagName('entry')
for y in entry:
published = y.getElementsByTagName('published')[0]
title = y.getElementsByTagName('title')[0]
author = y.getElementsByTagName('author')
authors = ''
for x in author:
a_name = x.getElementsByTagName('name')[0]
authors = authors + (a_name.firstChild.data) + ', '
authors = authors[:-2]
summary = y.getElementsByTagName('summary')[0]
link = y.getElementsByTagName('link')[0]
link1 = link.attributes['href'].value
link2 = y.getElementsByTagName('link')[1]
link3 = link2.attributes['href'].value
embed = discord.Embed(title = f'Title: {title.firstChild.data}', description = f'Published on: {published.firstChild.data}', color = discord.Colour.blue())
embed.set_author(name = f'{authors}')
await ctx.send(embed = embed)
embed = discord.Embed(title = 'Summary: ', description = f'{summary.firstChild.data}', color = discord.Colour.green())
embed.add_field(name = 'Link: ', value = f'{link1}', inline = False)
embed.add_field(name = 'Download link: ', value = f'{link3}', inline = False)
await ctx.send(embed = embed)
await ctx.send('.................................................................................................................................................')
def setup(client):
client.add_cog(arxiv(client)) | 2.875 | 3 |
FATERUI/ui_settingdlg.py | LynnChan706/Fater | 4 | 12787891 | <reponame>LynnChan706/Fater
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'settingdlg.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_settingdlg(object):
def setupUi(self, settingdlg):
settingdlg.setObjectName("settingdlg")
settingdlg.resize(670, 520)
self.verticalLayout = QtWidgets.QVBoxLayout(settingdlg)
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtWidgets.QTabWidget(settingdlg)
self.tabWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.tabWidget.setObjectName("tabWidget")
self.camera_tab = QtWidgets.QWidget()
self.camera_tab.setObjectName("camera_tab")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.camera_tab)
self.horizontalLayout_2.setContentsMargins(1, -1, 1, 1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.camparamgbox = QtWidgets.QGroupBox(self.camera_tab)
self.camparamgbox.setObjectName("camparamgbox")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.camparamgbox)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_5 = QtWidgets.QLabel(self.camparamgbox)
self.label_5.setObjectName("label_5")
self.horizontalLayout_6.addWidget(self.label_5)
self.cameranumlabel = QtWidgets.QLabel(self.camparamgbox)
self.cameranumlabel.setObjectName("cameranumlabel")
self.horizontalLayout_6.addWidget(self.cameranumlabel)
self.verticalLayout_4.addLayout(self.horizontalLayout_6)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.label_8 = QtWidgets.QLabel(self.camparamgbox)
self.label_8.setObjectName("label_8")
self.horizontalLayout_8.addWidget(self.label_8)
self.cameranumsbox = QtWidgets.QSpinBox(self.camparamgbox)
self.cameranumsbox.setMinimum(1)
self.cameranumsbox.setMaximum(1)
self.cameranumsbox.setObjectName("cameranumsbox")
self.horizontalLayout_8.addWidget(self.cameranumsbox)
self.verticalLayout_4.addLayout(self.horizontalLayout_8)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_4 = QtWidgets.QLabel(self.camparamgbox)
self.label_4.setObjectName("label_4")
self.horizontalLayout_5.addWidget(self.label_4)
self.cameratypecbox = QtWidgets.QComboBox(self.camparamgbox)
self.cameratypecbox.setObjectName("cameratypecbox")
self.cameratypecbox.addItem("")
self.horizontalLayout_5.addWidget(self.cameratypecbox)
self.verticalLayout_4.addLayout(self.horizontalLayout_5)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_6 = QtWidgets.QLabel(self.camparamgbox)
self.label_6.setObjectName("label_6")
self.horizontalLayout_7.addWidget(self.label_6)
self.cameraidcbox = QtWidgets.QComboBox(self.camparamgbox)
self.cameraidcbox.setObjectName("cameraidcbox")
self.horizontalLayout_7.addWidget(self.cameraidcbox)
self.verticalLayout_4.addLayout(self.horizontalLayout_7)
self.opencamerabtn = QtWidgets.QPushButton(self.camparamgbox)
self.opencamerabtn.setObjectName("opencamerabtn")
self.verticalLayout_4.addWidget(self.opencamerabtn)
self.balancebtn = QtWidgets.QPushButton(self.camparamgbox)
self.balancebtn.setObjectName("balancebtn")
self.verticalLayout_4.addWidget(self.balancebtn)
spacerItem = QtWidgets.QSpacerItem(20, 308, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem)
self.vcameragbox = QtWidgets.QGroupBox(self.camparamgbox)
self.vcameragbox.setCheckable(True)
self.vcameragbox.setChecked(False)
self.vcameragbox.setObjectName("vcameragbox")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.vcameragbox)
self.horizontalLayout_9.setContentsMargins(1, 3, 1, 3)
self.horizontalLayout_9.setSpacing(3)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.label_7 = QtWidgets.QLabel(self.vcameragbox)
self.label_7.setObjectName("label_7")
self.horizontalLayout_9.addWidget(self.label_7)
self.camfileedit = QtWidgets.QLineEdit(self.vcameragbox)
self.camfileedit.setObjectName("camfileedit")
self.horizontalLayout_9.addWidget(self.camfileedit)
self.openfilebtn = QtWidgets.QPushButton(self.vcameragbox)
self.openfilebtn.setMinimumSize(QtCore.QSize(31, 27))
self.openfilebtn.setMaximumSize(QtCore.QSize(31, 27))
self.openfilebtn.setObjectName("openfilebtn")
self.horizontalLayout_9.addWidget(self.openfilebtn)
self.verticalLayout_4.addWidget(self.vcameragbox)
self.horizontalLayout_2.addWidget(self.camparamgbox)
self.previewgbox = QtWidgets.QGroupBox(self.camera_tab)
self.previewgbox.setObjectName("previewgbox")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.previewgbox)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.previewlayout = QtWidgets.QVBoxLayout()
self.previewlayout.setObjectName("previewlayout")
self.verticalLayout_6.addLayout(self.previewlayout)
self.horizontalLayout_2.addWidget(self.previewgbox)
self.horizontalLayout_2.setStretch(0, 1)
self.horizontalLayout_2.setStretch(1, 2)
self.tabWidget.addTab(self.camera_tab, "")
self.infrared_tab = QtWidgets.QWidget()
self.infrared_tab.setObjectName("infrared_tab")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.infrared_tab)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.infrared_tab)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.infraredcBox = QtWidgets.QComboBox(self.infrared_tab)
self.infraredcBox.setObjectName("infraredcBox")
self.horizontalLayout.addWidget(self.infraredcBox)
self.opendevicebtn = QtWidgets.QPushButton(self.infrared_tab)
self.opendevicebtn.setObjectName("opendevicebtn")
self.horizontalLayout.addWidget(self.opendevicebtn)
self.closeBtn = QtWidgets.QPushButton(self.infrared_tab)
self.closeBtn.setObjectName("closeBtn")
self.horizontalLayout.addWidget(self.closeBtn)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 2)
self.horizontalLayout.setStretch(4, 4)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.messagegroupBox = QtWidgets.QGroupBox(self.infrared_tab)
self.messagegroupBox.setObjectName("messagegroupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.messagegroupBox)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.messageEdit = QtWidgets.QPlainTextEdit(self.messagegroupBox)
self.messageEdit.setPlainText("")
self.messageEdit.setObjectName("messageEdit")
self.verticalLayout_2.addWidget(self.messageEdit)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.sendEdit = QtWidgets.QLineEdit(self.messagegroupBox)
self.sendEdit.setObjectName("sendEdit")
self.horizontalLayout_4.addWidget(self.sendEdit)
self.sendBtn = QtWidgets.QPushButton(self.messagegroupBox)
self.sendBtn.setObjectName("sendBtn")
self.horizontalLayout_4.addWidget(self.sendBtn)
self.clearBtn = QtWidgets.QPushButton(self.messagegroupBox)
self.clearBtn.setObjectName("clearBtn")
self.horizontalLayout_4.addWidget(self.clearBtn)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_2 = QtWidgets.QLabel(self.messagegroupBox)
self.label_2.setObjectName("label_2")
self.horizontalLayout_3.addWidget(self.label_2)
self.receivecbox = QtWidgets.QComboBox(self.messagegroupBox)
self.receivecbox.setEnabled(True)
self.receivecbox.setObjectName("receivecbox")
self.horizontalLayout_3.addWidget(self.receivecbox)
self.label_3 = QtWidgets.QLabel(self.messagegroupBox)
self.label_3.setObjectName("label_3")
self.horizontalLayout_3.addWidget(self.label_3)
self.commandcbox = QtWidgets.QComboBox(self.messagegroupBox)
self.commandcbox.setEditable(True)
self.commandcbox.setObjectName("commandcbox")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.commandcbox.addItem("")
self.horizontalLayout_3.addWidget(self.commandcbox)
self.confirmBtn = QtWidgets.QPushButton(self.messagegroupBox)
self.confirmBtn.setObjectName("confirmBtn")
self.horizontalLayout_3.addWidget(self.confirmBtn)
self.horizontalLayout_3.setStretch(1, 1)
self.horizontalLayout_3.setStretch(3, 1)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.verticalLayout_3.addWidget(self.messagegroupBox)
self.tabWidget.addTab(self.infrared_tab, "")
self.verticalLayout.addWidget(self.tabWidget)
self.retranslateUi(settingdlg)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(settingdlg)
def retranslateUi(self, settingdlg):
_translate = QtCore.QCoreApplication.translate
settingdlg.setWindowTitle(_translate("settingdlg", "Setting"))
self.camparamgbox.setTitle(_translate("settingdlg", "Camera param"))
self.label_5.setText(_translate("settingdlg", "camera num"))
self.cameranumlabel.setText(_translate("settingdlg", "0"))
self.label_8.setText(_translate("settingdlg", "Set Camera"))
self.label_4.setText(_translate("settingdlg", "camera type"))
self.cameratypecbox.setItemText(0, _translate("settingdlg", "mindvision"))
self.label_6.setText(_translate("settingdlg", "Camera id"))
self.opencamerabtn.setText(_translate("settingdlg", "open"))
self.balancebtn.setText(_translate("settingdlg", "white balance"))
self.vcameragbox.setTitle(_translate("settingdlg", "Virtual Camera"))
self.label_7.setText(_translate("settingdlg", "FileDir"))
self.openfilebtn.setText(_translate("settingdlg", "..."))
self.previewgbox.setTitle(_translate("settingdlg", "Preview"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.camera_tab), _translate("settingdlg", "Camera"))
self.label.setText(_translate("settingdlg", "Device Select:"))
self.opendevicebtn.setText(_translate("settingdlg", "open"))
self.closeBtn.setText(_translate("settingdlg", "close"))
self.messagegroupBox.setTitle(_translate("settingdlg", "Command"))
self.sendBtn.setText(_translate("settingdlg", "Send"))
self.clearBtn.setText(_translate("settingdlg", "Clear"))
self.label_2.setText(_translate("settingdlg", "Receive:"))
self.label_3.setText(_translate("settingdlg", "SetAS:"))
self.commandcbox.setItemText(0, _translate("settingdlg", "MENU"))
self.commandcbox.setItemText(1, _translate("settingdlg", "RESET"))
self.commandcbox.setItemText(2, _translate("settingdlg", "UP"))
self.commandcbox.setItemText(3, _translate("settingdlg", "DOWN"))
self.commandcbox.setItemText(4, _translate("settingdlg", "LEFT"))
self.commandcbox.setItemText(5, _translate("settingdlg", "RIGHT"))
self.commandcbox.setItemText(6, _translate("settingdlg", "VERSION"))
self.commandcbox.setItemText(7, _translate("settingdlg", "F1"))
self.commandcbox.setItemText(8, _translate("settingdlg", "F2"))
self.commandcbox.setItemText(9, _translate("settingdlg", "F3"))
self.commandcbox.setItemText(10, _translate("settingdlg", "F4"))
self.confirmBtn.setText(_translate("settingdlg", "Confirm"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.infrared_tab), _translate("settingdlg", "Infrared"))
| 1.84375 | 2 |
_integration/python-pymysql/test.py | jfrabaute/go-mysql-server | 114 | 12787892 | # Copyright 2020-2021 Dolthub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pymysql.cursors
class TestMySQL(unittest.TestCase):
def test_connect(self):
connection = pymysql.connect(host='127.0.0.1',
user='root',
password='',
db='',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
sql = "SELECT name, email FROM mytable ORDER BY name, email"
cursor.execute(sql)
rows = cursor.fetchall()
expected = [
{"name": "<NAME>", "email": "<EMAIL>"},
{"name": "<NAME>", "email": "<EMAIL>"},
{"name": "<NAME>", "email": "<EMAIL>"},
{"name": "<NAME>", "email": "<EMAIL>"}
]
self.assertEqual(expected, rows)
finally:
connection.close()
if __name__ == '__main__':
unittest.main()
| 2.65625 | 3 |
Members.py | Raffy27/DiscordTool | 17 | 12787893 | from selenium import webdriver
from time import sleep
import json
import Client
t = Client.get_servers_raw()
if t == None:
print('Failed to get the list of servers')
raise SystemExit
if len(t) == 0:
print('The list of servers is empty')
raise SystemExit
print(f'Added {len(t)} servers to the queue')
with open('res\\GetMembers.js', 'r') as f:
uscan = f.read()
users = set()
total_expected = 0
total = 0
driver = webdriver.Edge('res\\msedgedriver.exe')
driver.get('https://discord.com/login')
print('Login to continue')
while not driver.current_url == 'https://discord.com/channels/@me':
sleep(1)
print('Login successful!')
for srv in t:
print(f'Processing [{srv["id"]}] {srv["name"]}')
count = Client.get_member_count(srv['id'])
print(f'Expected member count:', count)
total_expected += count
driver.get('https://discord.com/channels/' + srv['id'])
wait = True
while wait:
sleep(0.5)
wait = False
try:
driver.find_element_by_xpath('//div[@aria-label="Members"]')
except:
wait = True
sleep(0.5)
driver.execute_script(uscan)
done = False
while not done:
done = driver.execute_script('return done;')
sleep(1)
tmp = json.loads(driver.execute_script('return JSON.stringify(users);'))
total += len(tmp)
users = users.union(tmp)
print(f'Discovered {len(tmp)} members ~{len(tmp)*100//count}%.\n')
driver.close()
with open('Users.json', 'w') as f:
json.dump(list(users), f)
print(f'Exported {total} users as Users.json')
print(f'Final discovery rate: ~{total*100//total_expected}%') | 2.84375 | 3 |
peer/entity/urls.py | enriquepablo/peer | 0 | 12787894 | # Copyright 2011 Terena. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY TERENA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL TERENA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Terena.
from django.conf.urls import patterns, url
from peer.entity.feeds import EntitiesFeed, ChangesFeed
urlpatterns = patterns(
'peer.entity.views',
# Global views
url(r'^$', 'entities_list', name='entities_list'),
url(r'^rss$', EntitiesFeed(), name='entities_feed'),
url(r'^add$', 'entity_add', name='entity_add'),
# Search view
url(r'^search$', 'search.search_entities', name='search_entities'),
# Group Views
url(r'^group/add$',
'group.entity_group_add', name='entity_group_add'),
url(r'^group/(?P<entity_group_id>\d+)$',
'group.entity_group_view', name='entity_group_view'),
url(r'^group/(?P<entity_group_id>\d+)/edit$',
'group.entity_group_edit', name='entity_group_edit'),
url(r'^group/(?P<entity_group_id>\d+)/remove$',
'group.entity_group_remove', name='entity_group_remove'),
# Entity basic views
url(r'^(?P<entity_id>\d+)$', 'entity_view',
name='entity_view'),
url(r'^(?P<entity_id>\d+)/remove/$', 'entity_remove',
name='entity_remove'),
url(r'^(?P<domain_name>\w+)/add$', 'entity_add_with_domain',
name='entity_add_with_domain'),
# Metadata views
url(r'^(?P<entity_id>\d+)/edit_metadata/$',
'metadata.edit_metadata', name='edit_metadata'),
url(r'^(?P<entity_id>\d+)/text_edit_metadata/$',
'metadata.text_edit_metadata', name='text_edit_metadata'),
url(r'^(?P<entity_id>\d+)/file_edit_metadata/$',
'metadata.file_edit_metadata', name='file_edit_metadata'),
url(r'^(?P<entity_id>\d+)/remote_edit_metadata/$',
'metadata.remote_edit_metadata', name='remote_edit_metadata'),
# Team views
url(r'^(?P<entity_id>\d+)/sharing/$',
'teams.sharing', name='sharing'),
url(r'^(?P<entity_id>\d+)/list_delegates/$',
'teams.list_delegates', name='list_delegates'),
url(r'^(?P<entity_id>\d+)/make_owner/$',
'teams.make_owner', name='make_owner'),
url(r'^(?P<entity_id>\d+)/remove_delegate/(?P<user_id>\d+)$',
'teams.remove_delegate', name='remove_delegate'),
url(r'^(?P<entity_id>\d+)/add_delegate/(?P<username>.+)$',
'teams.add_delegate', name='add_delegate'),
# Metarefresh views
url(r'^(?P<entity_id>\d+)/edit_metarefresh/$',
'metadata_utils.metarefresh_edit', name='metarefresh_edit'),
# Monitor endpoint views
url(r'^(?P<entity_id>\d+)/monitoring_prefs/$',
'metadata_utils.monitoring_prefs', name='monitoring_prefs'),
# Metadata revision views
url(r'^(?P<entity_id>\d+)/get_diff/(?P<r1>\w+)/(?P<r2>\w+)$',
'revisions.get_diff', name='get_diff'),
url(r'^(?P<entity_id>\d+)/get_revision/(?P<rev>\w+)$',
'revisions.get_revision', name='get_revision'),
url(r'^(?P<entity_id>\d+)/latest_metadata/$',
'revisions.get_latest_metadata', name='get_latest_metadata'),
# CSS with highlight colors
url(r'^pygments.css$', 'revisions.get_pygments_css',
name='get_pygments_css'),
# Entity feed
url(r'^(?P<entity_id>\d+)/rss$', ChangesFeed(), name='changes_feed'),
)
| 1.265625 | 1 |
examples/wordcount_streaming.py | Prabhu19/pyspark-unittesting | 79 | 12787895 | <reponame>Prabhu19/pyspark-unittesting
""" wordcount example using the rdd api, we'll write a test for this """
from __future__ import print_function
from operator import add
def do_streaming_word_counts(lines):
""" count of words in a dstream of lines """
counts_stream = (lines.flatMap(lambda x: x.split())
.map(lambda x: (x, 1))
.reduceByKey(add)
)
return counts_stream
| 3.03125 | 3 |
atmPy/data_archives/__init__.py | wblumberg/atm-py | 5 | 12787896 | from . import arm | 1.101563 | 1 |
htx/__init__.py | gucharbon/pyheartex | 27 | 12787897 | from .htx import init_model_server
from .htx import _server as app
| 1.117188 | 1 |
examples/callbacks.py | craigh92/PyInquirer | 0 | 12787898 | <gh_stars>0
from PyInquirer import prompt
operations = [{
'type' : 'list',
'name' : 'operation1',
'message' : 'Choose operation',
'choices' : [
{
'name' : '<NAME>',
'callback' : lambda x: [print("Hello!")]
},
{
'name' : '<NAME>',
'callback' : lambda x: [print("World!")]
}
]
},
{
'type' : 'list',
'name' : 'operation2',
'message' : 'Choose operation',
'choices' : [
{
'name' : '<NAME>',
'callback' : lambda x: [print("Foo!")]
},
{
'name' : '<NAME>',
'callback' : lambda x: [print("Bar!")]
}
]
}]
prompt(operations) | 2.71875 | 3 |
preprocessing/pre_process_utils.py | CyrusDobbs/mining-argument-relations-using-transformers | 0 | 12787899 | import json
# From Marseille
class CDCPArgumentationDoc:
def __init__(self, file_root, merge_consecutive_spans=True):
self.doc_id = int(file_root[-5:])
self._ann_path = file_root + ".ann.json"
with open(file_root + ".txt") as f:
self.raw_text = f.read()
# annotation is always loaded
try:
with open(self._ann_path, encoding="utf8") as f:
ann = json.load(f)
self.url = {int(key): val for key, val in ann['url'].items()}
self.prop_labels = ann['prop_labels']
self.prop_offsets = [(int(a), int(b))
for a, b in ann['prop_offsets']]
self.reasons = [((int(a), int(b)), int(c), 'reason')
for (a, b), c in ann['reasons']]
self.evidences = [((int(a), int(b)), int(c), 'evidence')
for (a, b), c in ann['evidences']]
self.links = self.reasons + self.evidences
except FileNotFoundError:
raise FileNotFoundError("Annotation json not found at {}"
.format(self._ann_path))
if merge_consecutive_spans:
merge_spans(self)
self.links = _transitive(self.links)
link_dict = {a: [] for (a, b, l_type) in self.links}
for (a, b, l_type) in self.links:
link_dict[a] += [{'link': b, 'type': l_type}]
self.links_dict = {a: {'link': b, 'type': l_type} for (a, b, l_type) in self.links}
self.links_lists = {'locate': [(src, trg) for (src, trg, l_type) in self.links],
'link': [l_type for (src, trg, l_type) in self.links]}
self.reasons = [(a, b) for (a, b, l_type) in self.links if l_type == 'reason']
self.evidences = [(a, b) for (a, b, l_type) in self.links if l_type == 'evidence']
# From Marseille
def merge_spans(doc, include_nonarg=True):
"""Normalization needed for CDCP data because of multi-prop spans"""
# flatten multi-prop src spans like (3, 6) into new propositions
# as long as they never overlap with other links. This inevitably will
# drop some data but it's a very small number.
# function fails if called twice because
# precondition: doc.links = [((i, j), k)...]
# postcondition: doc.links = [(i, k)...]
new_links = []
new_props = {}
new_prop_offsets = {}
dropped = 0
for (start, end), trg, l_type in doc.links:
if start == end:
new_props[start] = (start, end)
new_prop_offsets[start] = doc.prop_offsets[start]
new_props[trg] = (trg, trg)
new_prop_offsets[trg] = doc.prop_offsets[trg]
new_links.append((start, trg, l_type))
elif start < end:
# multi-prop span. Check for problems:
problems = []
for (other_start, other_end), other_trg, other_l_type in doc.links:
if start == other_start and end == other_end:
continue
# another link coming out of a subset of our span
if start <= other_start <= other_end <= end:
problems.append(((other_start, other_end), other_trg))
# another link coming into a subset of our span
if start <= other_trg <= end:
problems.append(((other_start, other_end), other_trg))
if not len(problems):
if start in new_props:
assert (start, end) == new_props[start]
new_props[start] = (start, end)
new_prop_offsets[start] = (doc.prop_offsets[start][0],
doc.prop_offsets[end][1])
new_props[trg] = (trg, trg)
new_prop_offsets[trg] = doc.prop_offsets[trg]
new_links.append((start, trg, l_type))
else:
# Since we drop the possibly NEW span, there is no need
# to remove any negative links.
dropped += 1
if include_nonarg:
used_props = set(k for a, b in new_props.values()
for k in range(a, b + 1))
for k in range(len(doc.prop_offsets)):
if k not in used_props:
new_props[k] = (k, k)
new_prop_offsets[k] = doc.prop_offsets[k]
mapping = {key: k for k, key in enumerate(sorted(new_props))}
props = [val for _, val in sorted(new_props.items())]
doc.prop_offsets = [val for _, val in sorted(new_prop_offsets.items())]
doc.links = [(mapping[src], mapping[trg], l_type) for src, trg, l_type in new_links]
doc.prop_labels = [merge_prop_labels(doc.prop_labels[a:1 + b])
for a, b in props]
return doc
# From Marseille
def merge_prop_labels(labels):
"""After joining multiple propositions, we need to decide the new type.
Rules:
1. if the span is a single prop, keep the label
2. if the span props have the same type, use that type
3. Else, rules from Jon: policy>value>testimony>reference>fact
"""
if len(labels) == 1:
return labels[0]
labels = set(labels)
if len(labels) == 1:
return next(iter(labels))
if 'policy' in labels:
return 'policy'
elif 'value' in labels:
return 'value'
elif 'testimony' in labels:
return 'testimony'
elif 'reference' in labels:
return 'reference'
elif 'fact' in labels:
return 'fact'
else:
raise ValueError("weird labels: {}".format(" ".join(labels)))
# From Marseille
def _transitive(links):
"""perform transitive closure of links.
For input [(1, 2), (2, 3)] the output is [(1, 2), (2, 3), (1, 3)]
"""
links = set(links)
while True:
new_links = [(src_a, trg_b, l_type_a)
for src_a, trg_a, l_type_a in links
for src_b, trg_b, l_type_b in links
if trg_a == src_b
and l_type_a == l_type_b
and (src_a, trg_b, l_type_a) not in links]
if new_links:
links.update(new_links)
else:
break
return links
| 2.609375 | 3 |
tests/test___main__.py | oterrier/openapi-python-client | 172 | 12787900 | def test_main(mocker):
app = mocker.patch("openapi_python_client.cli.app")
# noinspection PyUnresolvedReferences
from openapi_python_client import __main__
app.assert_called_once()
| 1.585938 | 2 |
finicityapi/models/certified_institution.py | monarchmoney/finicity-python | 0 | 12787901 | <reponame>monarchmoney/finicity-python
# -*- coding: utf-8 -*-
import finicityapi.models.child_institution
class CertifiedInstitution(object):
"""Implementation of the 'Certified Institution' model.
TODO: type model description here.
Attributes:
name (string): Institution's name
id (long|int): Institution's Id
voa (bool): VOA Certification
voi (bool): VOI Certification
state_agg (bool): State Agg Certification
ach (bool): ACH Certification
trans_agg (bool): Trans Agg Certification
aha (bool): AHA Certification
child_institutions (list of ChildInstitution): TODO: type description
here.
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"id":'id',
"voa":'voa',
"voi":'voi',
"state_agg":'stateAgg',
"ach":'ach',
"trans_agg":'transAgg',
"aha":'aha',
"child_institutions":'childInstitutions'
}
def __init__(self,
name=None,
id=None,
voa=None,
voi=None,
state_agg=None,
ach=None,
trans_agg=None,
aha=None,
child_institutions=None,
additional_properties = {}):
"""Constructor for the CertifiedInstitution class"""
# Initialize members of the class
self.name = name
self.id = id
self.voa = voa
self.voi = voi
self.state_agg = state_agg
self.ach = ach
self.trans_agg = trans_agg
self.aha = aha
self.child_institutions = child_institutions
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
voa = dictionary.get('voa')
voi = dictionary.get('voi')
state_agg = dictionary.get('stateAgg')
ach = dictionary.get('ach')
trans_agg = dictionary.get('transAgg')
aha = dictionary.get('aha')
child_institutions = None
if dictionary.get('childInstitutions') != None:
child_institutions = list()
for structure in dictionary.get('childInstitutions'):
child_institutions.append(finicityapi.models.child_institution.ChildInstitution.from_dictionary(structure))
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(name,
id,
voa,
voi,
state_agg,
ach,
trans_agg,
aha,
child_institutions,
dictionary)
| 2.734375 | 3 |
slp_utils/utils.py | 66chenbiao/sleepace_verification_tool | 0 | 12787902 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
# @author : <NAME>
# @Email : <EMAIL>
# @Project : Python_Files
# @File : utils.py
# @Software: PyCharm
# @Time : 2021/5/20 下午7:42
"""
import os
import struct
import sys
import time
import traceback
from datetime import datetime
from pathlib import Path
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
pd.set_option("display.max_columns", None)
# 相应的我们可以设置显示的最大行数
pd.set_option("display.max_rows", None)
# function: byte2int
def byte2int(data, mode="u16"):
dbyte = bytearray(data)
darray = []
i = 0
while i < len(dbyte):
if "u8" == mode:
darray.append(dbyte[i])
i = i + 1
elif "u16" == mode:
darray.append(dbyte[i] | dbyte[i + 1] << 8)
i = i + 2
return darray
# end: byte2int
# function: byte2float
def byte2float(data, mode="float"):
darray = []
i = 0
if "float" == mode:
while i < len(data):
fx = struct.unpack("f", data[i : i + 4])
darray.append(fx)
i = i + 4
elif "double" == mode:
while i < len(data):
dx = struct.unpack("d", data[i : i + 8])
darray.append(dx)
i = i + 8
return darray
# end: byte2float
def read_bytefile(path, folder, file, mode="u8"):
fname = path + folder + file
f = open(fname, "rb")
dtmp = f.read()
global rslt
if "u8" == mode:
rslt = byte2int(dtmp, mode="u8")
if "u16" == mode:
rslt = byte2int(dtmp, mode="u16")
if "float" == mode:
rslt = byte2float(dtmp, mode="float")
if "double" == mode:
rslt = byte2float(dtmp, mode="double")
return rslt
# 向sheet中写入一行数据
def insertOne(value, sheet):
sheet.append(value)
def read_raw(src_dir, fname):
bcg, gain = [], []
fname = src_dir + fname
f = open(fname, "rb")
dtmp = f.read()
dbyte = bytearray(dtmp)
i = 0
while i < len(dbyte):
bcg.append(dbyte[i] | dbyte[i + 1] << 8)
gain.append(dbyte[i + 2])
i = i + 3
return bcg, gain
def read_wgt(src_dir, fname):
wgt = []
fname = src_dir + fname
f = open(fname, "rb")
dtmp = f.read()
dbyte = bytearray(dtmp)
i = 0
while i < len(dbyte):
wgt.append(dbyte[i + 1] | dbyte[i] << 8)
i = i + 2
return wgt
def time2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d %H:%M:%S")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2time(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return otherStyleTime
def day2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2day(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d", timeArray)
return otherStyleTime
def hour2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d %H:%M")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2hour(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M", timeArray)
return otherStyleTime
def time2datetime(tranTime, pList):
tdelta, startstamp = 60, int(time2stamp(tranTime))
t = [datetime.fromtimestamp(startstamp + t * tdelta) for t in range(len(pList))]
return t
def time_formattime(pList):
famTime = [datetime.fromisoformat(t) for t in pList]
return famTime
def quest_time_extract(num_spl, quest_outbed, slp_awTim):
num_slp0 = num_spl[0]
num_slp2 = num_spl[:2]
aslp_day = stamp2day(day2stamp(slp_awTim) - 86400)
awak_day = slp_awTim
if len(num_spl) == 6:
outbed_stamp = "0" + num_spl[0] + ":" + num_spl[1:3] + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 4:
outbed_stamp = num_spl[:2] + ":" + num_spl[2:] + ":00"
if int(num_slp2) >= 19 and int(num_slp2) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp2) >= 0 and int(num_slp2) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 3:
outbed_stamp = "0" + num_spl[0] + ":" + num_spl[1:] + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 2:
outbed_stamp = "0" + num_spl[0] + ":" + "00" + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 1:
outbed_stamp = "0" + num_spl + ":" + "00" + ":00"
if int(num_spl) >= 19 and int(num_spl) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_spl) >= 0 and int(num_spl) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
def diff_acl(slpList, psgList):
fslp_diff = int(abs(time2stamp(str(psgList)) - time2stamp(str(slpList))) / 60)
return fslp_diff
def num_pop(num1: list, num2: list):
if len(num1) > len(num2):
lenDiff = len(num1) - len(num2)
for i in range(lenDiff):
num1.pop()
elif len(num2) > len(num1):
lenDiff = len(num2) - len(num1)
for i in range(lenDiff):
num2.pop()
def num3_pop(num1: list, num2: list, num3: list):
num2 = [str(i) for i in range(len(num2))]
num3 = [str(i) for i in range(len(num3))]
maxLen = max(len(num1), len(num2), len(num3))
minLen = min(len(num1), len(num2), len(num3))
plen = maxLen - minLen
new_num1, new_num2, new_num3 = 0, 0, 0
for i in range(maxLen):
if len(num1) == maxLen:
new_num1 = num1[:-plen]
elif len(num2) == maxLen:
new_num2 = num2[:-plen]
elif len(num3) == maxLen:
new_num3 = num3[:-plen]
return new_num1, new_num2, new_num3
def len_compare(pr_list: list, rr_list: list):
if len(pr_list) > len(rr_list):
return len(rr_list)
elif len(pr_list) < len(rr_list):
return len(pr_list)
def path_concat(sub_dir, pathName):
_path = str(sub_dir.joinpath(pathName)) + "/"
return _path
def is_empty_file_3(file_path: str):
assert isinstance(file_path, str), f"file_path参数类型不是字符串类型: {type(file_path)}"
p = Path(file_path)
assert p.is_file(), f"file_path不是一个文件: {file_path}"
return p.stat().st_size == 0
def dir_empty(dir_path):
try:
next(os.scandir(dir_path))
return False
except StopIteration:
return True
def select_num(df1, df2):
# num_requried = 0
hr_lower_limit = df1["hr"].map(lambda x: x != 0)
hr_upper_limit = df1["hr"].map(lambda x: x != 255)
br_lower_limit = df1["br"].map(lambda x: x != 0)
br_upper_limit = df1["br"].map(lambda x: x != 255)
pr_lower_limit = df2["pr"].map(lambda x: x != 0)
pr_upper_limit = df2["pr"].map(lambda x: x != 255)
rr_lower_limit = df2["rr"].map(lambda x: x != 0)
rr_upper_limit = df2["rr"].map(lambda x: x != 255)
df1 = df1[
(hr_lower_limit & hr_upper_limit & br_lower_limit & br_upper_limit)
& (pr_lower_limit & pr_upper_limit & rr_lower_limit & rr_upper_limit)
]
df2 = df2[
(hr_lower_limit & hr_upper_limit & br_lower_limit & br_upper_limit)
& (pr_lower_limit & pr_upper_limit & rr_lower_limit & rr_upper_limit)
]
df1 = df1.reset_index(drop=True) # 重新给索引
df2 = df2.reset_index(drop=True) # 重新给索引
return df1, df2
def minute_mean(df, cname, stime):
# 计算每分钟SLP的心率、呼吸率
hr_min_list = []
slp_time_min_list = []
df_min = int(len(df[cname]) / 60) # 数据共多少分钟
for i in range(df_min):
hr_min_len = (i + 1) * 60
num = 0
temp = 0
slp_time_min = stime + hr_min_len
for j in df[cname][hr_min_len - 60 : hr_min_len]:
if j != 0 and j != 255:
num += 1
temp += j
if num > 0:
res = int(temp / num)
hr_min_list.append(res)
if num == 0:
hr_min_list.append(0)
slp_time_min_list.append(slp_time_min)
# rslt = {'time':slp_time_min_list,'hr':hr_min_list,'br':br_min_list}
# df_clean = pd.DataFrame(data=rslt)
return slp_time_min_list, hr_min_list
def file_exist(my_file):
txt_list = []
if Path(my_file).is_file() is False:
Path(my_file).touch()
return txt_list
def Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv):
PR = PR[PR.map(lambda x: x > 0)]
HR = HR[HR.map(lambda x: x > 0)]
PR = PR.reset_index(drop=True) # 重新给索引
HR = HR.reset_index(drop=True) # 重新给索引
diff_hr = PR - HR
diff_hr_cnt = 0
try:
diff_hr_pre = abs(diff_hr) / PR
diff_hr_pre = diff_hr_pre.dropna()
diff_hr_pre = diff_hr_pre * 100
for i, val in enumerate(diff_hr):
if i <= len(PR):
if abs(val) <= PR[i] * 0.1 or abs(val) <= 5:
diff_hr_cnt += 1
hr_mean = round(np.mean(abs(diff_hr)), 2)
hr_std = round(np.std(abs(diff_hr), ddof=1), 2)
if len(diff_hr_pre) == 0:
print(traceback.print_exc())
else:
acc_hr = diff_hr_cnt / len(diff_hr_pre)
txt_content = (
fcsv
+ " 心率准确性[%d / %d]: %.2f %%"
% (
diff_hr_cnt,
len(diff_hr_pre),
round(acc_hr * 100, 2),
)
+ " 心率误差:",
str(hr_mean) + "±" + str(hr_std),
)
f = open(src_txt + "accuracy.txt", "a")
f.write((str(txt_content) + "\r"))
return acc_hr
except Exception as exc:
print(exc)
print(traceback.print_exc())
def Respiration_rate_accuracy_calculat(RR, br, src_txt, fcsv):
RR = RR[RR.map(lambda x: x > 0)]
br = br[br.map(lambda x: x > 0)]
RR = RR.reset_index(drop=True) # 重新给索引
br = br.reset_index(drop=True) # 重新给索引
try:
# 计算呼吸率准确性
diff_br_pre = abs(RR - br)
diff_br_pre = diff_br_pre.dropna()
diff_br_cnt = 0
for i in diff_br_pre:
if i <= 2:
diff_br_cnt += 1
br_mean = round(np.mean(abs(diff_br_pre)), 2)
br_std = round(np.std(abs(diff_br_pre), ddof=1), 2)
if len(diff_br_pre) == 0:
print(traceback.print_exc())
else:
acc_br = diff_br_cnt / len(diff_br_pre)
txt_content = (
fcsv
+ " 呼吸率准确性[%d / %d]: %.2f %%"
% (
diff_br_cnt,
len(diff_br_pre),
round(acc_br * 100, 2),
)
+ " 呼吸率误差:",
str(br_mean) + "±" + str(br_std),
)
f = open(src_txt + "accuracy.txt", "a")
f.write((str(txt_content) + "\r"))
return acc_br
except Exception as exc:
print(exc)
print(traceback.print_exc())
def draw_PR_save(PR, slp_hr, time_offset, img_dir, fcsv, acc_flag):
# 作图
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False
# 配置横坐标日期显示#格式#间隔
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y%m/%d %H:%M:%S"))
plt.gca().xaxis.set_major_locator(mdates.MinuteLocator(interval=15))
if len(PR) > len(time_offset):
PR = PR[:-1]
ax1 = plt.subplot(412)
plt.plot(time_offset, PR, "r-", label="PSG")
plt.plot(time_offset, slp_hr, "b-", label="智能枕头")
plt.title("心率对比(bpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax1.get_xticklabels(), visible=False, fontsize=9)
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(40, 100)
f = plt.gcf() # 获取当前图像
if acc_flag == 1:
f.savefig(img_dir + "err_img/" + fcsv + ".png", bbox_inches="tight")
elif acc_flag == 0:
f.savefig(img_dir + "nor_img/" + fcsv + ".png", bbox_inches="tight")
f.clear() # 释放内存
def draw_PR_RR_save(PR, RR, slp_hr, slp_br, time_offset, img_dir, fcsv, acc_flag):
# 作图
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False
# fig.suptitle(fname)
# 配置横坐标日期显示#格式#间隔
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y%m/%d %H:%M:%S"))
plt.gca().xaxis.set_major_locator(mdates.MinuteLocator(interval=15))
if len(PR) > len(time_offset):
PR = PR[:-1]
if len(RR) > len(time_offset):
RR = RR[:-1]
print(len(time_offset), len(PR))
print(time_offset)
ax1 = plt.subplot(412)
plt.plot(time_offset, PR, "r-", label="PSG")
plt.plot(time_offset, slp_hr, "b-", label="智能枕头")
plt.title("心率对比(bpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax1.get_xticklabels(), visible=False, fontsize=9)
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(40, 100)
ax2 = plt.subplot(413, sharex=ax1)
plt.plot(time_offset, RR, "r-", label="PSG")
plt.plot(time_offset, slp_br, "b-", label="智能枕头")
plt.title("呼吸率对比(rpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax2.get_xticklabels(), visible=True, fontsize=9)
plt.xticks()
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(5, 35)
f = plt.gcf() # 获取当前图像
if acc_flag == 1:
f.savefig(img_dir + "err_img/" + fcsv + ".png", bbox_inches="tight")
elif acc_flag == 0:
f.savefig(img_dir + "nor_img/" + fcsv + ".png", bbox_inches="tight")
# f.figlegend()
f.clear() # 释放内存
def slp_hr_br_transfrom(cat_dir, save_dir, flag):
# slp批量仿真数据转成csv文件
flist = os.listdir(cat_dir + "hr_sec/")
for fcsv in flist[:]:
fname = fcsv.split(".")[0]
hr_list = read_bytefile(cat_dir, "hr_sec/", fcsv, mode="u8")
br_list = read_bytefile(cat_dir, "br_sec/", fcsv, mode="u8")
startstamp = int(fcsv.split("_")[-1].split(".")[0])
time_list = [startstamp + t for t in range(len(hr_list))]
if flag == 0:
rslt = {"time": time_list, "heart_rate": hr_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "heart_rate"]
)
elif flag == 1:
rslt = {"time": time_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "breath_rate"]
)
elif flag == 2:
rslt = {"time": time_list, "heart_rate": hr_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"),
index=False,
header=["time", "heart_rate", "breath_rate"],
)
def psg_slp_heart_cal(src_slp, src_psg, src_txt, src_img):
"""心率准确性脚本计算"""
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
print(fcsv, psg_flist[i])
data_psg = pd.read_csv(src_psg + psg_flist[i])
data_slp.columns = ["time", "hr"]
data_psg.columns = ["time", "pr"]
time_set = [
data_slp["time"].tolist()[0],
time2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
time2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
data_psg["timestamp"] = time2stamp(data_psg["time"])
print(
"开始区间:", file_start, "结束区间:", file_end, "公共区间长度:", (file_end - file_start)
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr < 0.9:
acc_flag = 1
draw_PR_save(PR, slp_hr, time_offset, src_img, simg_name, acc_flag)
else:
draw_PR_save(PR, slp_hr, time_offset, src_img, simg_name, acc_flag)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
def psg_slp_heart_breath_cal(src_slp, src_psg, src_txt, src_img, flag):
"""心率、呼吸率准确性计算脚本"""
if flag == 0:
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
slp_idList = [i.split(".")[0].split("_")[0] for i in slp_flist]
psg_idList = [i.split(".")[0].split("_")[0] for i in psg_flist]
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
# print(slp_idList[i],psg_idList[i])
j = psg_idList.index(slp_idList[i])
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
data_psg = pd.read_csv(src_psg + psg_flist[j])
data_slp.columns = ["time", "hr", "br"]
data_psg.columns = ["time", "pr", "rr"]
time_set = [
data_slp["time"].tolist()[0],
time2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
time2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
data_psg["timestamp"] = time2stamp(data_psg["time"])
print(
"开始区间:",
file_start,
"结束区间:",
file_end,
"公共区间长度:",
(file_end - file_start),
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
br_time, br_list = minute_mean(slp_clist, "br", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rr_time, rr_list = minute_mean(psg_clist, "rr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list, "br": br_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list, "rr": rr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
BR = clean_slp["br"]
RR = clean_psg["rr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
acc_br = Respiration_rate_accuracy_calculat(RR, BR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
slp_br = pd.Series(list(BR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr is not None and acc_br is not None:
if acc_hr < 0.9 or acc_br < 0.9:
acc_flag = 1
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
else:
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
elif flag == 1:
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
slp_idList = [i.split(".")[0].split("_")[0] for i in slp_flist]
psg_idList = [i.split(".")[0].split("_")[0].lstrip("0") for i in psg_flist]
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
j = psg_idList.index(slp_idList[i])
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
data_psg = pd.read_csv(src_psg + psg_flist[j])
data_slp.columns = ["time", "hr", "br"]
data_psg.columns = ["time", "pr", "rr"]
time_set = [
data_slp["time"].tolist()[0],
hour2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
hour2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
print(time_set[1], time_set[0])
# data_psg["timestamp"] = data_psg["time"].apply(lambda x: hour2stamp(x))
data_psg["timestamp"] = hour2stamp(data_psg["time"])
print(
"开始区间:",
file_start,
"结束区间:",
file_end,
"公共区间长度:",
(file_end - file_start),
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
br_time, br_list = minute_mean(slp_clist, "br", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rr_time, rr_list = minute_mean(psg_clist, "rr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list, "br": br_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list, "rr": rr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
BR = clean_slp["br"]
RR = clean_psg["rr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
acc_br = Respiration_rate_accuracy_calculat(RR, BR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
slp_br = pd.Series(list(BR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr < 0.9 or acc_br < 0.9:
acc_flag = 1
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
else:
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
def psg_rr_transfrom(cat_dir, save_dir):
# psg批量仿真数据转成csv文件
flist = os.listdir(cat_dir + "br_sec/")
for fcsv in flist[:]:
fname = fcsv.split(".")[0]
br_list = read_bytefile(cat_dir, "br_sec/", fcsv, mode="u8")
startstamp = int(fcsv.split("_")[-1].split(".")[0])
time_list = [startstamp + t for t in range(len(br_list))]
rslt = {"time": time_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "breath_rate"]
)
def read_summary(path, folder, file):
fname = path + folder + file
f = open(fname, "rb")
dtmp = f.read()
dtmp = bytearray(dtmp)
mean_hrate = dtmp[0] | dtmp[1] << 8 # 平均心率
mean_brate = dtmp[2] | dtmp[3] << 8 # 平均呼吸率
fallasleeptime = dtmp[4] | dtmp[5] << 8 # 入睡时刻
wakeuptime = dtmp[6] | dtmp[7] << 8 # 清醒时刻
offbed_cnt = dtmp[8] | dtmp[9] << 8 # 离床次数
turnover_cnt = dtmp[10] | dtmp[11] << 8 # 翻身次数
bodymove_cnt = dtmp[12] | dtmp[13] << 8 # 体动次数
heartstop_cnt = dtmp[14] | dtmp[15] << 8 # 心跳暂停次数
respstop_cnt = dtmp[16] | dtmp[17] << 8 # 呼吸暂停次数
deepsleep_per = dtmp[18] | dtmp[19] << 8 # 深睡比例
remsleep_per = dtmp[20] | dtmp[21] << 8 # 中睡比例
lightsleep_per = dtmp[22] | dtmp[23] << 8 # 浅睡比例
wakesleep_per = dtmp[24] | dtmp[25] << 8 # 清醒比例
wakesleep_time = dtmp[26] | dtmp[27] << 8 # 清醒时长
lightsleep_time = dtmp[28] | dtmp[29] << 8 # 浅睡时长
remsleep_time = dtmp[30] | dtmp[31] << 8 # 中睡时长
deepsleep_time = dtmp[32] | dtmp[33] << 8 # 深睡时长
wake_off_cnt = dtmp[34] | dtmp[35] << 8 # 清醒(含离床)次数
hrate_max = dtmp[36] | dtmp[37] << 8 # 最高心率
brate_max = dtmp[38] | dtmp[39] << 8 # 最高呼吸率
hrate_min = dtmp[40] | dtmp[41] << 8 # 最低心率
brate_min = dtmp[42] | dtmp[43] << 8 # 最低呼吸率
hrate_high_time = dtmp[44] | dtmp[55] << 8 # 心率过速时长
hrate_low_time = dtmp[46] | dtmp[47] << 8 # 心率过缓时长
brate_high_time = dtmp[48] | dtmp[49] << 8 # 呼吸过速时长
brate_low_time = dtmp[50] | dtmp[51] << 8 # 呼吸过缓时长
allsleep_time = dtmp[52] | dtmp[53] << 8 # 睡觉时长
body_move = dtmp[54] | dtmp[55] << 8 # 躁动不安扣分
off_bed = dtmp[56] | dtmp[57] << 8 # 离床扣分
wake_cnt = dtmp[58] | dtmp[59] << 8 # 易醒扣分
start_time = dtmp[60] | dtmp[61] << 8 # 睡太晚扣分
fall_asleep = dtmp[62] | dtmp[63] << 8 # 难于入睡扣分
perc_deep = dtmp[64] | dtmp[65] << 8 # 深睡不足扣分
sleep_long = dtmp[66] | dtmp[67] << 8 # 睡时间过长扣分
sleep_less = dtmp[68] | dtmp[69] << 8 # 睡眠时间过短扣分
breath_stop = dtmp[70] | dtmp[71] << 8 # 呼吸暂停扣分
heart_stop = dtmp[72] | dtmp[73] << 8 # 心跳暂停扣分
hrate_low = dtmp[74] | dtmp[75] << 8 # 心跳过缓扣分
hrate_high = dtmp[76] | dtmp[77] << 8 # 心跳过速扣分
brate_low = dtmp[78] | dtmp[79] << 8 # 呼吸过缓扣分
brate_high = dtmp[80] | dtmp[81] << 8 # 呼吸过速扣分
benign_sleep = dtmp[82] | dtmp[83] << 8 # 良性睡眠分布扣分
offset = dtmp[84] | dtmp[85] << 8
data_len = dtmp[86] | dtmp[87] << 8
start_stamp = dtmp[88] | dtmp[89] << 8 | dtmp[90] << 16 | dtmp[91] << 24
print(start_stamp, start_stamp + fallasleeptime * 60)
diff = (
body_move
+ off_bed
+ wake_cnt
+ start_time
+ fall_asleep
+ perc_deep
+ sleep_long
+ sleep_less
+ breath_stop
+ heart_stop
+ hrate_low
+ hrate_high
+ brate_low
+ brate_high
+ benign_sleep
)
score = 100 - diff
rslt = {"offset": offset, "len": data_len, "start_time": start_stamp}
print("-----睡眠报告-----")
print(">>> 睡眠比例")
print(
"睡眠时长:%d H %d min (入睡:%d, 清醒:%d)"
% (allsleep_time / 60, allsleep_time % 60, fallasleeptime, wakeuptime)
)
print(
"深睡时长:%d H %d min (%d%%) | 中睡时长:%d H %d min (%d%%) "
"| 浅睡时长:%d H %d min (%d%%) | 清醒时长:%d H %d min (%d%%)"
% (
deepsleep_time / 60,
deepsleep_time % 60,
deepsleep_per,
remsleep_time / 60,
remsleep_time % 60,
remsleep_per,
lightsleep_time / 60,
lightsleep_time % 60,
lightsleep_per,
wakesleep_time / 60,
wakesleep_time % 60,
wakesleep_per,
)
)
print(">>> 呼吸心率")
print("平均呼吸:%d bpm (min: %d, max: %d)" % (mean_brate, brate_min, brate_max))
print("呼吸暂停:%d 次" % respstop_cnt)
print(
"呼吸过速:%d H %d min | 呼吸过缓:%d H %d min "
% (
brate_high_time / 60,
brate_high_time % 60,
brate_low_time / 60,
brate_low_time % 60,
)
)
print("平均心率:%d bpm (min: %d, max: %d)" % (mean_hrate, hrate_min, hrate_max))
print(
"心率过速:%d H %d min | 心率过缓:%d H %d min "
% (
hrate_high_time / 60,
hrate_high_time % 60,
hrate_low_time / 60,
hrate_low_time % 60,
)
)
print("心跳暂停:%d 次" % heartstop_cnt)
print(">>> 体动翻身")
print(
"体动次数:%d | 翻身次数:%d | 离床次数:%d | 清醒次数:%d "
% (bodymove_cnt, turnover_cnt, offbed_cnt, wake_off_cnt)
)
print(">>> 睡眠分数")
print("整晚睡眠得分:", score)
print("躁动不安扣分:", body_move)
print("离床过多扣分:", off_bed)
print("睡觉易醒扣分:", wake_cnt)
print("睡觉太晚扣分:", start_time)
print("难于入睡扣分:", fall_asleep)
print("深睡不足扣分:", perc_deep)
print("睡眠过长扣分:", sleep_long)
print("睡眠过短扣分:", sleep_less)
print("呼吸暂停扣分:", breath_stop)
print("心跳暂停扣分:", heart_stop)
print("心跳过缓扣分:", hrate_low)
print("心跳过速扣分:", hrate_high)
print("呼吸过缓扣分:", brate_low)
print("呼吸过速扣分:", brate_high)
print("良性睡眠扣分:", benign_sleep)
print("----------------")
return rslt
| 2.671875 | 3 |
app/exo_currency/api/v1/views.py | jcazallasc/exo-investing | 0 | 12787903 | <filename>app/exo_currency/api/v1/views.py
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.views import APIView
from exo_currency.api.v1.pagination import SmallSetPagination
from exo_currency.api.v1.serializers import (
CalculateAmountSerializer, CalculateTimeWeightedRateSerializer,
CurrencyExchangeRateSerializer)
from exo_currency.models import CurrencyExchangeRate
from exo_currency.utils.base_currency_exchanger import BaseCurrencyExchanger
class CurrencyExchangeRateListAPIView(generics.ListAPIView):
queryset = CurrencyExchangeRate.objects.all().order_by('-id')
serializer_class = CurrencyExchangeRateSerializer
pagination_class = SmallSetPagination
def get_queryset(self):
queryset = self.queryset
_from = self.request.query_params.get('from')
if _from:
queryset = queryset.filter(valuation_date__gte=_from)
_to = self.request.query_params.get('to')
if _to:
queryset = queryset.filter(valuation_date__lte=_to)
return queryset
class CalculateAmountAPIView(APIView):
def get(self, request, origin_currency, amount, target_currency):
amount_calculated = BaseCurrencyExchanger().calculate_amount(
origin_currency,
target_currency,
amount,
)
serializer = CalculateAmountSerializer({
'amount': amount_calculated,
})
return Response(serializer.data)
class CalculateTimeWeightedRateAPIView(APIView):
def get(
self,
request,
origin_currency,
amount,
target_currency,
date_invested,
):
amount_calculated = BaseCurrencyExchanger().calculate_time_weighted_rate(
origin_currency,
amount,
target_currency,
date_invested,
)
serializer = CalculateTimeWeightedRateSerializer({
'twr': amount_calculated,
})
return Response(serializer.data)
| 1.953125 | 2 |
sqla_mixins.py | bboe/sqla_mixins | 3 | 12787904 | <reponame>bboe/sqla_mixins
import sys
from passlib.hash import pbkdf2_sha512
from sqlalchemy import Column, DateTime, String, Integer, Unicode, func
from sqlalchemy.ext.declarative import declared_attr, has_inherited_table
if sys.version_info < (3, 0):
builtins = __import__('__builtin__')
else:
import builtins
__version__ = '0.6'
class BasicBase(object):
"""A base sqlalchemy class that provides `id` and `created_at` fields."""
id = Column(Integer, primary_key=True)
created_at = Column(DateTime(timezone=True), default=func.now(),
index=True, nullable=False)
@declared_attr
def __tablename__(cls):
"""Set the tablename to be the lowercase of the class name.
Reference: http://docs.sqlalchemy.org/en/rel_0_9/orm/extensions/declarative.html#controlling-table-inheritance-with-mixins # noqa
"""
if has_inherited_table(cls) and BasicBase not in cls.__bases__:
return None
return cls.__name__.lower()
@classmethod
def fetch_by(cls, **kwargs):
"""Return a single object (or None) by the named attributes."""
return cls.query_by(**kwargs).first()
@classmethod
def fetch_by_id(cls, element_id):
"""Return an object (or None) by its id."""
return cls.query_by(id=int(element_id)).first()
@classmethod
def query_by(cls, **kwargs):
"""Return a query result for the named attributes."""
if not hasattr(builtins, '_sqla_mixins_session'):
raise Exception('__builtin__._sqla_mixins_session must be set to '
'your session class')
session = builtins._sqla_mixins_session()
return session.query(cls).filter_by(**kwargs)
def clone(self, exclude=None, update=None):
"""Return a shallow-copy clone of the sqlalchemy object.
Relationship objects are not copied, however foreign key assignments
held by this object are copied shallowly.
:param exclude: If provided, should be an iterable that contains the
names attributes to exclude from the copy. The attributes
`created_at` and `id` are always excluded.
:param update: If provided, should be a mapping of attribute name, to
the value that should be set.
"""
# Prepare attribute exclusion set
if not exclude:
exclude = set()
if not isinstance(exclude, set):
exclude = set(exclude)
exclude.update(('created_at', 'id'))
# Build a mapping of attributes to values
attrs = {x: getattr(self, x) for x in self.__mapper__.columns.keys()
if x not in exclude}
if update: # Update the mapping if necessary
attrs.update(update)
# Build and return the SQLA object
return self.__class__(**attrs)
def update(self, _ignore_order=False, **kwargs):
"""Update the named attributes.
Return a list of modified attribute names, or False if not updated.
Setting _ignore_order to True indicates that attribute lists should be
sorted before being compared. This is useful when updating relationship
lists.
"""
modified = []
for attr, value in kwargs.items():
self_value = getattr(self, attr)
if _ignore_order and (isinstance(self_value, list) and
isinstance(value, list)):
if sorted(self_value) != sorted(value):
setattr(self, attr, value)
modified.append(attr)
elif getattr(self, attr) != value:
setattr(self, attr, value)
modified.append(attr)
return modified or False
class UserMixin(object):
HASH_ROUNDS = 12000
SALT_SIZE = 16
username = Column(Unicode, index=True, nullable=False, unique=True)
_password = Column(String, nullable=False)
@classmethod
def hash_password(cls, password):
return pbkdf2_sha512.encrypt(password, rounds=cls.HASH_ROUNDS,
salt_size=cls.SALT_SIZE)
def __init__(self, *args, **kwargs):
if 'password' in kwargs:
kwargs['_password'] = UserMixin.hash_password(kwargs['password'])
del kwargs['password']
super(UserMixin, self).__init__(*args, **kwargs)
def set_password(self, password):
self._password = <PASSWORD>password(password)
password = property(fset=set_password)
def verify_password(self, password):
return pbkdf2_sha512.verify(password, self._password)
| 2.359375 | 2 |
python/test_gilded_rose.py | agooding-netizen/GildedRose-Refactoring-Kata | 0 | 12787905 | # -*- coding: utf-8 -*-
import unittest
from gilded_rose import Item, GildedRose
class GildedRoseTest(unittest.TestCase):
def test_concert_under_5(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 2, 30)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(33, items[0].quality)
def test_concert_under_10(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 8, 30)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(32, items[0].quality)
def test_concert(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 12, 30)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(31, items[0].quality)
def test_concert_expired(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 0, 24)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].quality)
def test_concert_max_10(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 9, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(50, items[0].quality)
def test_concert_max_5(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 4, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(50, items[0].quality)
def test_concert_max(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 13, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(50, items[0].quality)
def test_vest(self):
items = [Item("+5 Dexterity Vest", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(19, items[0].quality)
def test_vest_expired(self):
items = [Item("+5 Dexterity Vest", 0, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(18, items[0].quality)
def test__vest_min(self):
items = [Item("+5 Dexterity Vest", 5, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].quality)
def test_mongoose(self):
items = [Item("Elixir of the Mongoose", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(19, items[0].quality)
def test_mongoose_expired(self):
items = [Item("Elixir of the Mongoose", 0, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(18, items[0].quality)
def test_mongoose_min(self):
items = [Item("Elixir of the Mongoose", 5, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].quality)
def test_sulfuras(self):
items = [Item("Sulfuras, Hand of Ragnaros", 10, 80)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(80, items[0].quality)
def test_sulfuras_expired(self):
items = [Item("Sulfuras, Hand of Ragnaros", 0, 80)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(80, items[0].quality)
def test_brie(self):
items = [Item("Aged Brie", 10, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(1, items[0].quality)
def test_brie_expired(self):
items = [Item("Aged Brie", 0, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(2, items[0].quality)
def test_brie_max(self):
items = [Item("Aged Brie", 12, 49)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(50, items[0].quality)
def test_conjured(self):
items = [Item("Conjured Mana Cake", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(18, items[0].quality)
def test_conjured_expired(self):
items = [Item("Conjured Mana Cake", 0, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(16, items[0].quality)
def test_conjured_min(self):
items = [Item("Conjured Mana Cake", 5, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].quality)
def test_concert_sell_in(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
def test_vest_sell_in(self):
items = [Item("+5 Dexterity Vest", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
def test_mongoose_sell_in(self):
items = [Item("Elixir of the Mongoose", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
def test_sulfuras_sell_in(self):
items = [Item("Sulfuras, Hand of Ragnaros", 0, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].sell_in)
def test_brie_sell_in(self):
items = [Item("Aged Brie", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
def test_conjured_sell_in(self):
items = [Item("Conjured Mana Cake", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
if __name__ == '__main__':
unittest.main()
| 3.21875 | 3 |
roundup_docx2.py | pythonscriptkiddie/final_roundup | 0 | 12787906 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 23 13:45:44 2019
@author: thomassullivan
"""
import docx
from docx.enum.dml import MSO_THEME_COLOR_INDEX
from objects import Article
def add_hyperlink(paragraph, text, url):
# This gets access to the document.xml.rels file and gets a new relation id value
#print(paragraph)
#print(text)
#print(url)
try:
part = paragraph.part
r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)
# Create the w:hyperlink tag and add needed values
hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')
hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )
# Create a w:r element and a new w:rPr element
new_run = docx.oxml.shared.OxmlElement('w:r')
rPr = docx.oxml.shared.OxmlElement('w:rPr')
# Join all the xml elements together add add the required text to the w:r element
new_run.append(rPr)
new_run.text = text
hyperlink.append(new_run)
# Create a new Run object and add the hyperlink into it
r = paragraph.add_run ()
r._r.append (hyperlink)
# A workaround for the lack of a hyperlink style (doesn't go purple after using the link)
# Delete this if using a template that has the hyperlink style in it
r.font.color.theme_color = MSO_THEME_COLOR_INDEX.HYPERLINK
r.font.underline = True
return hyperlink
except Exception as e:
print(e)
def add_article(document, article):
#print(article)
try:
new_paragraph = document.add_paragraph('') #add blank paragraph that we append the text to
add_hyperlink(paragraph=new_paragraph, text=article.name, url=article.link)
#print(Article.get_date_formatted(article))
new_paragraph.add_run(' ({0}) '.format(Article.get_date_formatted(article))) #blank space between the link and the description
new_paragraph.add_run(article.description)
except Exception as e:
print(e)
def add_section(document, section):
section_name = document.add_paragraph(section.section_name)
section.categories.sort(key=lambda x: x.name, reverse=True)
section.categories.reverse()
for category in section.categories:
add_category(document, category)
def add_category(document, category):
category_name = document.add_paragraph(category.category_name)
#category.articles = category.articles.sort()
category.articles.sort(key=lambda x: x.name, reverse=True)
category.articles.reverse()
for article in category.articles:
#print(article)
add_article(document, article)
def create_roundup2(document, roundup_title, categories):
title = document.add_paragraph(roundup_title)
for category in categories:
add_category(document, category)
def complete_roundup2(filename, roundup_title, sections):
new_document = docx.Document()
create_roundup2(new_document, roundup_title, sections)
new_document.save('{0}.docx'.format(filename))
def create_roundup_docx(document, roundup_title, categories):
title = document.add_paragraph(roundup_title)
for category in categories:
add_category(document, category)
def create_complete_roundup(filename, roundup_title, categories):
new_document = docx.Document()
create_roundup_docx(new_document, roundup_title, categories)
new_document.save('{0}.docx'.format(filename))
if __name__ == '__main__':
print('roundup_docx2 loaded')
| 2.640625 | 3 |
3stage_random_topo/test_spectre_netlist.py | jason-sjy/DVAE_mix2 | 0 | 12787907 | <filename>3stage_random_topo/test_spectre_netlist.py
import os
import sys
import argparse
import logging
import time
import math
import traceback
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from shutil import copy
import igraph
import torch
from torch import nn, optim
from util import *
from dataset import *
from opt_util import *
from read import *
import os
import re
import random
import string
from netlist_generator import *
topo_dims = 5
random_topo = np.zeros((topo_dims, 1))
for j in range(topo_dims):
if j <= 1:
random_topo[j, 0] = np.random.randint(4, 11)
elif j == 2:
random_topo[j, 0] = np.random.randint(0, 25)
else:
random_topo[j, 0] = np.random.randint(0, 5)
if random_topo[j, 0] == 4:
random_topo[j, 0] = 0
# goal, constr, design_id = evaluate_topo(random_topo, constr_num, False)
# design_id = gen_random_id()
# amp_generator(design_id, random_topo)
amp_generator(random_topo)
print(random_topo)
y, z, r, c = sizing()
# os.system('hspice64 -i ./3stage.sp -o 3stage')
print("y:", np.shape(y))
print("y:", y)
print("z:", z)
print("r:", r)
print("c:", c)
| 2.25 | 2 |
tea_main/string-tools/dos2unix/main.py | lbyoo/mytoolbox | 0 | 12787908 | <gh_stars>0
import os
import sys
import user
config = user.config()
def run(path):
if os.path.isfile(path):
if path.endswith(config['suffix']):
with open(path, "r", encoding="utf-8") as f:
tmpfile = open(path+'.tmp', 'w+b')
for line in f:
line = line.replace('\r', '')
line = line.replace('\n', '')
tmpfile.write((line+'\n').encode("utf-8"))
tmpfile.close()
os.remove(path)
os.rename(path+'.tmp', path)
if os.path.isdir(path):
for f in os.listdir(path):
run(os.path.join(path,f))
def main():
run(config["path"])
if __name__ == "__main__":
main()
| 2.609375 | 3 |
loaders/mnist.py | ddomurad/simple_mlp | 0 | 12787909 | <filename>loaders/mnist.py
import matplotlib.pyplot as plt
import numpy as np
import os
import gzip
def get_network_constrains():
return (784, 10)
def plot(data):
img_size = int(len(data)**0.5)
plt.imshow(np.matrix(data).reshape(img_size, img_size), cmap='gray')
def _normalize(data):
data = data/np.linalg.norm(data)
data -= np.mean(data)
return data
def load(path, kind='train'):
kind = 't10k' if kind == 'test' else 'train'
labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind)
images_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)
images = [_normalize(image) for image in images]
return images, labels | 2.703125 | 3 |
platypush/plugins/tcp.py | RichardChiang/platypush | 228 | 12787910 | import base64
import json
import socket
from typing import Optional, Union
from platypush.plugins import Plugin, action
class TcpPlugin(Plugin):
"""
Plugin for raw TCP communications.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._sockets = {}
def _connect(self, host: str, port: int, timeout: Optional[float] = None) -> socket.socket:
sd = self._sockets.get((host, port))
if sd:
return sd
sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout:
sd.settimeout(timeout)
sd.connect((host, port))
self._sockets[(host, port)] = sd
return sd
@action
def connect(self, host: str, port: int, timeout: Optional[float] = None):
"""
Open a TCP connection.
:param host: Host IP/name.
:param port: TCP port.
:param timeout: Connection timeout in seconds (default: None).
"""
self._connect(host, port, timeout)
@action
def close(self, host: str, port: int):
"""
Close an active TCP connection.
:param host: Host IP/name.
:param port: TCP port.
"""
sd = self._sockets.get((host, port))
if not sd:
self.logger.warning('Not connected to ({}, {})'.format(host, port))
return
sd.close()
@action
def send(self, data: Union[bytes, str], host: str, port: int, binary: bool = False,
timeout: Optional[float] = None, recv_response: bool = False, **recv_opts):
"""
Send data over a TCP connection. If the connection isn't active it will be created.
:param data: Data to be sent, as bytes or string.
:param host: Host IP/name.
:param port: TCP port.
:param binary: If set to True and ``data`` is a string then will be treated as base64-encoded binary input.
:param timeout: Connection timeout in seconds (default: None).
:param recv_response: If True then the action will wait for a response from the server before closing the
connection. Note that ``recv_opts`` must be specified in this case - at least ``length``.
"""
if isinstance(data, list) or isinstance(data, dict):
data = json.dumps(data)
if isinstance(data, str):
data = data.encode()
if binary:
data = base64.decodebytes(data)
sd = self._connect(host, port, timeout)
try:
sd.send(data)
if recv_response:
recv_opts.update({
'host': host,
'port': port,
'timeout': timeout,
'binary': binary,
})
return self.recv(**recv_opts)
finally:
self.close(host, port)
@action
def recv(self, length: int, host: str, port: int, binary: bool = False, timeout: Optional[float] = None) -> str:
"""
Receive data from a TCP connection. If the connection isn't active it will be created.
:param length: Maximum number of bytes to be received.
:param host: Host IP/name.
:param port: TCP port.
:param binary: If set to True then the output will be base64-encoded, otherwise decoded as string.
:param timeout: Connection timeout in seconds (default: None).
"""
sd = self._connect(host, port, timeout)
try:
data = sd.recv(length)
if binary:
data = base64.encodebytes(data).decode()
else:
data = data.decode()
return data
finally:
self.close(host, port)
# vim:sw=4:ts=4:et:
| 2.75 | 3 |
tinybrain.py | StefanKopieczek/tinybrain | 0 | 12787911 | <filename>tinybrain.py<gh_stars>0
import sys;p=sys.argv[1]
def w(c):s=sys.stdout;s.write(str(c)+' ');s.flush()
i=j=0;d=[0];a=abs;b=bool
while i<len(p):
s=0;x,y=p[i],d[j]
if x=='>':d+=[0];j+=1
elif x=='<':d=[0]+d
elif x in '+-':d[j]+=(-1,1)[x=='+']
elif x=='.':w(d[j])
elif x==',':d[j]=(input('>'))
elif x in '[]':s=(1,-1)[x==']']
if s!=0 and b(s+1)!=b(y):
c=1;l,r=((']','['),('[',']'))[s==1]
while 1:
i+=s
if p[i]==l:c+=1
elif p[i]==r:
c-=1
if c==0:break
i+=1
| 2.65625 | 3 |
src/course4/week2/tests/test_tsp.py | manoldonev/algo1-assignments | 0 | 12787912 | """Week2 Test Cases Traveling Salesman Problem"""
import math
from src.course4.week2.tsp import generate_complete_euclidean_distanace_graph, traveling_salesman_problem
def test_tsp1():
points = [(1, 1), (4, 1), (1, 4), (4, 4)]
result = traveling_salesman_problem(points)
assert result == 12
def test_tsp2():
points = [(0, 0), (0, 3), (3, 3)]
result = traveling_salesman_problem(points)
assert math.floor(result) == 10
def test_tsp3():
points = [(0, 0), (4, 3), (4, 0), (0, 3)]
result = traveling_salesman_problem(points)
assert result == 14
def test_tsp4():
points = [
(1.000, 1.00),
(1.125, 1.00),
(1.250, 1.00),
(1.500, 1.00),
(1.750, 1.00),
(2.000, 1.00),
(1.000, 2.00),
(1.125, 2.00),
(1.250, 2.00),
(1.500, 2.00),
(1.750, 2.00),
(2.000, 2.00)
]
result = traveling_salesman_problem(points)
assert result == 4
def test_tsp5():
points = [
(0.549963E-07, 0.985808E-08),
(-28.8733, -0.797739E-07),
(-79.2916, -21.4033),
(-14.6577, -43.3896),
(-64.7473, 21.8982),
(-29.0585, -43.2167),
(-72.0785, 0.181581),
(-36.0366, -21.6135),
(-50.4808, 7.37447),
(-50.5859, -21.5882),
(-0.135819, -28.7293),
(-65.0866, -36.0625),
(-21.4983, 7.31942),
(-57.5687, -43.2506),
(-43.0700, 14.5548)
]
result = traveling_salesman_problem(points)
assert math.floor(result) == 284
def test_tsp6():
points = [(0, 2.05), (3.414213562373095, 3.4642135623730947),
(0.5857864376269049, 0.6357864376269047),
(0.5857864376269049, 3.4642135623730947),
(2, 0),
(4.05, 2.05),
(2, 4.10),
(3.414213562373095, 0.6357864376269047)]
result = traveling_salesman_problem(points)
assert math.floor(result) == 12
| 3.296875 | 3 |
lungmask/data_io.py | kwxu/lungmask | 0 | 12787913 | <reponame>kwxu/lungmask
import os
from utils import read_file_contents_list, convert_flat_2_3d, get_logger
import nibabel as nib
import numpy as np
import pickle
logger = get_logger('DataFolder')
class DataFolder:
def __init__(self, in_folder, data_file_list=None):
self._in_folder = in_folder
self._file_list = []
if data_file_list is None:
self._file_list = self._get_file_list_in_folder(in_folder)
else:
self._file_list = self._get_file_list(data_file_list)
self._suffix = '.nii.gz'
def get_folder(self):
return self._in_folder
def if_file_exist(self, idx):
file_path = self.get_file_path(idx)
return os.path.exists(file_path)
def get_file_name(self, idx):
return self._file_list[idx]
def get_file_path(self, idx):
return os.path.join(self._in_folder, self.get_file_name(idx))
def get_first_path(self):
return self.get_file_path(0)
def num_files(self):
return len(self._file_list)
def print_idx(self, idx):
logger.info('Process %s (%d/%d)' % (self.get_file_path(idx), idx, self.num_files()))
def get_chunks_list(self, num_pieces):
full_id_list = range(self.num_files())
return [full_id_list[i::num_pieces] for i in range(num_pieces)]
def get_chunks_list_batch_size(self, batch_size):
num_chunks = self.num_files() // batch_size
chunk_list = [range(batch_size*i, batch_size*(i+1)) for i in range(num_chunks)]
if self.num_files() > num_chunks * batch_size:
chunk_list.append(range(num_chunks * batch_size, self.num_files()))
return chunk_list
def get_data_file_list(self):
return self._file_list
def set_file_list(self, file_list):
self._file_list = file_list
def change_suffix(self, new_suffix):
new_file_list = [file_name.replace(self._suffix, new_suffix) for file_name in self._file_list]
self._file_list = new_file_list
self._suffix = new_suffix
@staticmethod
def _get_file_list(file_list_txt):
return read_file_contents_list(file_list_txt)
@staticmethod
def get_data_folder_obj(config, in_folder, data_list_txt=None):
in_data_list_txt = data_list_txt
# if in_data_list_txt is None:
# # in_data_list_txt = config['data_file_list']
data_folder = DataFolder(in_folder, in_data_list_txt)
return data_folder
@staticmethod
def get_data_folder_obj_with_list(in_folder, data_list):
data_folder = DataFolder(in_folder)
data_folder.set_file_list(data_list)
return data_folder
@staticmethod
def _get_file_list_in_folder(folder_path):
print(f'Reading file list from folder {folder_path}', flush=True)
return os.listdir(folder_path)
class ScanWrapper:
def __init__(self, img_path):
self._img = nib.load(img_path)
self._path = img_path
def get_path(self):
return self._path
def get_file_name(self):
return os.path.basename(self._path)
def get_header(self):
return self._img.header
def get_affine(self):
return self._img.affine
def get_shape(self):
return self.get_header().get_data_shape()
def get_number_voxel(self):
return np.prod(self.get_shape())
def get_data(self):
return self._img.get_data()
def get_center_slices(self):
im_data = self.get_data()
im_shape = im_data.shape
slice_x = im_data[int(im_shape[0] / 2) - 1, :, :]
slice_x = np.flip(slice_x, 0)
slice_x = np.rot90(slice_x)
slice_y = im_data[:, int(im_shape[0] / 2) - 1, :]
slice_y = np.flip(slice_y, 0)
slice_y = np.rot90(slice_y)
slice_z = im_data[:, :, int(im_shape[2] / 2) - 1]
slice_z = np.rot90(slice_z)
return slice_x, slice_y, slice_z
def save_scan_same_space(self, file_path, img_data):
logger.info(f'Saving image to {file_path}')
img_obj = nib.Nifti1Image(img_data,
affine=self.get_affine(),
header=self.get_header())
nib.save(img_obj, file_path)
def save_scan_flat_img(self, data_flat, out_path):
img_shape = self.get_shape()
data_3d = convert_flat_2_3d(data_flat, img_shape)
self.save_scan_same_space(out_path, data_3d)
class ScanWrapperWithMask(ScanWrapper):
def __init__(self, img_path, mask_path):
super().__init__(img_path)
self._mask = nib.load(mask_path)
self._mask_path = mask_path
def get_number_voxel(self):
return len(self.get_data_flat())
def get_data_flat(self):
img_data = self.get_data()
mask_data = self._mask.get_data()
img_data_flat = img_data[mask_data == 1]
return img_data_flat
def save_scan_flat_img(self, data_flat, out_path):
mask_data = self._mask.get_data()
img_data = np.zeros(mask_data.shape, dtype=float)
img_data[mask_data == 1] = data_flat
self.save_scan_same_space(out_path, img_data)
# def get_1D_loc_from_3D(self, idx_x, idx_y, idx_z):
def save_object(object_to_save, file_path):
with open(file_path, 'wb') as output:
print(f'Saving obj to {file_path}', flush=True)
pickle.dump(object_to_save, output, pickle.HIGHEST_PROTOCOL)
def load_object(file_path):
with open(file_path, 'rb') as input_file:
logger.info(f'Loading binary data from {file_path}')
obj = pickle.load(input_file)
return obj
| 2.40625 | 2 |
src/py/validation/validation.py | meetup/jira-onboarding | 5 | 12787914 | <gh_stars>1-10
from jira_instance.jira_instance import check_jira_server
import sys, traceback
## Run validation checks on a given configuration json object
def validate_config(config_json):
try:
assert "server" in config_json # config must have server specified
assert check_jira_server(config_json["server"]) # specified server must be valid
assert "name" in config_json # config must have a name specified
assert config_json["name"] # name must be non-empty
assert "project_key" in config_json # config must have a project key specified
assert config_json["project_key"] # project key must be non-empty
assert "epic_fields" in config_json # config must have epic field block specified
epic_json = config_json["epic_fields"]
assert "summary" in epic_json # config must have an epic summary specified
assert epic_json["summary"] # epic summary must be non-empty
assert "description" in epic_json # config must have an epic description specified
assert epic_json["description"] # epic_description must be non-empty
assert "issues_to_create" in config_json # config must have a list of issues specified
assert len(config_json["issues_to_create"]) > 0 # list of issues must be non-empty
for issue_json in config_json["issues_to_create"]:
assert "summary" in issue_json # each issue must have a summary specified
assert "description" in issue_json # each issue must have a description specified
if "assignee" in issue_json:
assert "name" in issue_json["assignee"] # if assignee is specified, must have a field called 'name' insde
except AssertionError:
print "Config did not pass validation:"
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
return False
return True
| 2.6875 | 3 |
mbuild/lib/bulk_materials/__init__.py | daico007/mbuild | 101 | 12787915 | """mBuild bulk materials library."""
from mbuild.lib.bulk_materials.amorphous_silica_bulk import AmorphousSilicaBulk
| 1.257813 | 1 |
SKALA4 in phase0.py | ska-telescope/ska-rfi-monitoring-processing | 0 | 12787916 | <reponame>ska-telescope/ska-rfi-monitoring-processing<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 14:28:07 2019
Reads a TDD data file from LFAA PHASE 0 experiment
@author: f.divruno
"""
import os, os.path
import numpy as np
import matplotlib.pyplot as plt
from math import *
import matplotlib
import rfiLib as RFI
from scipy import signal
#from rfiLib import * # RFI functions.
font = {'family' : 'DejaVu Sans','weight' : 'normal','size' : 22}
matplotlib.rc('font', **font)
##%%
#
## A class that will downsample the data and recompute when zoomed.
#class DataDisplayDownsampler(object):
# def __init__(self, xdata, ydata):
# self.origYData = ydata
# self.origXData = xdata
# self.max_points = 1000
# self.delta = xdata[-1] - xdata[0]
#
# def downsample(self, xstart, xend):
# # get the points in the view range
# mask = (self.origXData >= xstart) & (self.origXData <= xend)
# # dilate the mask by one to catch the points just outside
# # of the view range to not truncate the line
# mask = np.convolve([1, 1], mask, mode='same').astype(bool)
# # sort out how many points to drop
# ratio = max(np.sum(mask) // self.max_points, 1)
#
# # mask data
# xdata = self.origXData[mask]
# ydata = self.origYData[mask]
#
# # downsample xdata
# xdata = xdata[::ratio]
# # calculate max peak for y data
# ydata = np.reshape(ydata,[len(ydata)//ratio,ratio])
# ydata = np.max(ydata,1)
## ydata = ydata[::ratio]
#
# print("using {} of {} visible points".format(
# len(ydata), np.sum(mask)))
#
# return xdata, ydata
#
# def update(self, ax):
# # Update the line
# lims = ax.viewLim
# if np.abs(lims.width - self.delta) > 1e-8:
# self.delta = lims.width
# xstart, xend = lims.intervalx
# self.line.set_data(*self.downsample(xstart, xend))
# ax.figure.canvas.draw_idle()
#
#def plot_max_peak(xdata,ydata):
# d = DataDisplayDownsampler(xdata, ydata)
# fig, ax = plt.subplots()
#
# # Hook up the line
## d.line, = ax.plot(xdata, ydata)
# d.line = ax.plot(xdata, ydata)
# ax.set_autoscale_on(False) # Otherwise, infinite loop
#
# # Connect for changing the view limits
# ax.callbacks.connect('xlim_changed', d.update)
# ax.set_xlim(xdata.min(), xdata.max())
# plt.show()
# return d
#
#
#
#def read_phase0_data(directory,filenum):
# files = os.listdir(directory)
# # Each file is 1000 seconds, 4096 freq points, thats 16 minutes aprox
# # there are aprox 4 files per hour.
# # Data reduction:
# # Each file look for max, min, 99%, 95%, 90%, limt line for RFI.
# #
#
# header = 131072
# if filenum == 'all':
# N_files = np.size(files)
# else:
# N_files = filenum
# time_vect = np.zeros([N_files])
# data = np.zeros([N_files,header]).astype('int8')
# i = 0
# for i in range(N_files):
# f = files[i]
# fullpath = os.path.join(directory, f)
# if os.path.splitext(fullpath)[1] == '.tdd':
# with open(fullpath,'r') as f:
# header = np.fromfile(f, dtype=np.dtype('>f8') , count = 1)#dtype=np.floating point 8 bytes
# data[i,:] = np.fromfile(f, dtype=np.dtype('>i1') , count = 131072)#dtype=np.int64
# sec = os.path.splitext(fullpath)[0][-2::]
# mins = os.path.splitext(fullpath)[0][-4:-2]
# hours = os.path.splitext(fullpath)[0][-6:-4]
# time_vect[i] = int(hours)*60*60+int(mins)*60+int(sec)
# i += 1
# print(str(i))
#
# return time_vect,data
#
#
#def plot_waterfall(freq,data,title):
#
# fig = plt.figure(figsize=(20,12))
# ax = fig.add_axes((0.1, 0.1, 0.8, 0.8))
# #cbax = fig.add_axes((0.85, 0.05, 0.95, .95))
#
#
# left = freq[0]
# right = freq[-1]
# bottom = 0
# top = 2000
# data_log = (10*np.log10(data))
# cim = ax.imshow( data_log,
# origin='lower', interpolation='nearest',
# cmap= 'jet',
# extent=(left, right, bottom, top),
# )
#
# ax.set_aspect(abs(right-left) / abs(top-bottom))
# plt.xlabel('MHz')
# plt.ylabel('time')
# plt.title(title)
#
#def idle_fun(val):
# return val
#
#def power_spectrum(data):
# N = np.size(data,1)
# P = np.abs(data[:,0:int(N/2)].astype('float32'))**2
#
# return P
#
#def partition(signal,max_len, func1, func2= idle_fun):
# N_files = np.size(signal,0)
# N = np.size(signal,1)
## data = np.zeros([0,N])
# if N_files*N > max_len: # steps to calculate all the FFTs
# steps = int(N_files*N/max_len)
# N_step = int(N_files/steps)
# for i in range(steps):
# A = func2(func1(signal[int(i*N_step):int((i+1)*N_step)]))
# if i ==0:
# data = A
# else:
# data = np.concatenate((data,A))
# print(str(i+1)+' of '+str(steps))
# if N_files > (i+1)*N_step:
# A = func2(func1(signal[int((i+1)*N_step)::]))
# data = np.concatenate((data,A))
# else:
# data= func2(func1(signal))
# return data
#
#
#def fft_calc(signal, power_calc ,plot_figs):
# # if power_calc = 1: Returns the power spectrum in real frequencies and the freq vector in MHz.
# # if power_calc = 0: Returns the complex voltage spectrum and the freq vector in MHz.
#
# max_length = 8000*4096/10
# N = np.size(signal,1)
# N_files = np.size(signal,0)
#
# fs = 800e6
#
# freq = (np.fft.fftfreq(N, d=1/fs))
# if power_calc ==1:
# data_fft = partition(signal,max_length,np.fft.fft,power_spectrum)/N
# freq = freq[0:int(N/2)]
# else:
# data_fft = partition(signal,max_length,np.fft.fft)/N
#
# plot_all = 0 # this is for debugging, if set to 1 generates 1 plot for each file.
# if plot_all ==1:
## N_files = 1 #for debugging
# plt.figure()
# for i in range(N_files):
# D = abs(data_fft)**2
# plt.plot(freq/1e6,10*np.log10(D))
# plt.title('power spectrum' )
#
# if plot_figs==1:
# ave = np.average(abs(data_fft)**2,0)
# plt.figure()
# plt.plot(freq/1e6,10*np.log10(ave))
# plt.title('Average of all the captures')
# return [freq/1e6,data_fft]
#
#
#def total_power(freq,data,fo,B,Tlow,Thigh,plot_flag):
## fo = 160 #MHz
## B = 1 #MHz
## low_th = 0.4e1
## high_th = 2e17
#
# fmin = fo-B/2
# fmax = fo+B/2
#
# fstep = freq[1] - freq[0]
#
# ind1 = int((fmin-freq[0])/fstep)
# ind2 = int((fmax-freq[0])/fstep)
#
# total_power = np.sum(data[:,ind1:ind2]**2,1)
#
# mask = np.ones(len(total_power), dtype=bool)
# for i in range(np.size(total_power)):
# if (total_power[i] < Tlow or total_power[i] > Thigh):
# mask[i] = False
#
# Data2 = total_power[mask]
#
# if plot_flag ==1:
# plt.figure()
# plt.plot(10*np.log10((Data2)))
# plt.title('freq = ' + str(fo) + ' MHz, B = ' + str(B) + ' MHz')
# return Data2
#% Read only one file to debug:
#
#directory = "C:\\Users\\f.divruno\\Dropbox (SKA)\\14- RFI environment\\01- Australia\\Phase-0\\2019-03-31\\DATA\\RX-02_SKALA-4.0\\Pol-X"
##directory = "G:\\Team Drives\\LowBridging Phase-0\\TPM\\DATA\\2019-03-31 (1)\\TRIGGER\\RX-02_SKALA-4.0\\Pol-X"
#
#time,time_data = read_phase0_data(directory,filenum=1)
#[freq1,Ant1_fft] = fft_calc(time_data, 0,0)
#Power = Ant1_fft*np.conjugate(Ant1_fft)
#Autocorr = np.abs(np.fft.ifft(Power))
#
##[freq2,Ant2] = read_trigger_data(base_dir,date,antenna2,pol,1)
##[freq3,Ant3] = read_trigger_data(base_dir,date,antenna3,pol,1)
##[freq4,Ant4] = read_trigger_data(base_dir,date,antenna4,pol,1)
#
#Ant1_ave = 10*np.log10(np.average(abs(Power),0))
#
#
#plt.figure()
#plt.plot(freq1,Ant1_ave)
#plt.grid('on')
#plt.title('SPD1 plot')
#
#plot_all = 0
#if plot_all ==1:
#
# plt.figure()
# plt.plot(np.transpose(Autocorr))
# plt.grid('on')
# plt.title('Auto Correlation')
#
# plt.figure()
# plt.plot(np.transpose(time_data))
# plt.grid('on')
# plt.title('Time domain data')
#%% Read all the time files to calculate statistics in bands.
directory = "C:\\Users\\f.divruno\\Dropbox (SKA)\\14- RFI environment\\01- Australia\\Phase-0\\2019-03-31\\DATA\\RX-02_SKALA-4.0\\Pol-X"
read_data_from_files = 0
if read_data_from_files ==1:
time,td_data = RFI.read_phase0_data(directory,files_num='all')
[freq,Ant1_pow] = RFI.fft_calc(td_data,800e6, 0, 0)
#del time_data
np.savez_compressed(directory + '\\'+'Phase0_SKALA4_full_day', freq=freq, SKALA4_pow=Ant1_pow,time=time, td_data=td_data)
else:
Aux = np.load('C:\\Users\\f.divruno\\Dropbox (SKA)\\14- RFI environment\\01- Australia\\Phase-0\\Phase0_SKALA4_full_day.npz')
SKALA4_pow = Aux['SKALA4_pow'] #in V**2
SKALA4_time = Aux['time'] # in seconds
SKALA4_freq = Aux['freq'] # in MHz
load_td_data = 1
if load_td_data:
Aux = np.load(r'C:\Users\F.Divruno\Dropbox (SKA)\14- RFI environment\01- Australia\Phase-0\2019-03-31\DATA\RX-02_SKALA-4.0\Pol-X\Phase0_full_day_raw.npz')
td_data = Aux['td_data'] # time domain raw data
#%% filtering the td signal
fs = 800e6
fn = fs/2
f1 = 137.7e6/fn
f2 = 138e6/fn
ind = 49
b, a = signal.butter(3, [f1,f2],btype='bandpass',output='ba')
zi = signal.lfilter_zi(b, a)
z, _ = signal.lfilter(b, a, td_data[ind,:], zi=zi*td_data[ind,0])
#Apply the filter again, to have a result filtered at an order the same as filtfilt:
z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
#Use filtfilt to apply the filter:
y = signal.filtfilt(b, a, td_data[ind,:])
plt.figure()
plt.plot( td_data[ind,:], 'b', alpha=0.75)
plt.plot( z, 'r--', z2, 'r', y, 'k')
plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice','filtfilt'), loc='best')
[freq,P_orig] = RFI.fft_calc(td_data[ind,:],800e6, 1, 0)
[freq,P_filt] = RFI.fft_calc(y,800e6, 1, 0)
plt.figure()
plt.plot(freq,10*np.log10(P_orig),'r',freq,10*np.log10(P_filt),'b')
#%% maximum values
f= SKALA4_freq
data= SKALA4_pow
title= 'Max values - Phase0 - SKALA4.0 - 20190331'
RFI.plot_percentile(f,data,100,directory,title='Maximum values - '+title)
#(freq,data,percentile,outdir,dataunits='dBm',title=''):
#%% percentiles
perc = 100
f= SKALA4_freq
data= SKALA4_pow
title= 'Phase0 - SKALA4.0 - 20190331'
RFI.plot_percentile(f,data,perc,title)
#%% Spectrogram
Fstart = 0
Fstop = 400
time = SKALA4_time
f= SKALA4_freq
data= SKALA4_pow
title= 'Phase0 - SKALA4.0 - 20190331'
RFI.plot_spectrogram(time/3600,f,data,'Spectrogram -'+title,Fstart,Fstop,Tstart=0,Tstop=0)
#%% total power in a band
# This allows to see what is the time domain behaviour of the interference in this band.
time = SKALA4_time
f= SKALA4_freq
data= SKALA4_pow
title= 'Phase0 - SKALA4.0 - 20190331'
fo = 200 #MHz # Aeronautic Comms band
B = 300 #MHz
#fo = 128 #MHz # Aeronautic Comms band
#B = 16 #MHz
#fo = 138.5 #MHz #This is Orbcom
#B = 2 #MHz
#fo = 148.5 #MHz VHF radio?
#B = 1 #MHz
#fo = 255 #MHz Milstar satellite
#B = 30 #MHz
fo = 112.5 #MHz Milstar satellite
B = 9 #MHz
fo = 16 #MHz # low freq stuf
B = 25 #MHz
low_th = 0
high_th = 1e200
time2,Data2 = RFI.total_power(time*3600,f,data,fo,B,low_th,high_th,0)
plt.figure()
plt.plot(time2/3600,10*np.log10(Data2))
plt.grid(True,'both')
plt.title(title + ' - Fo=' + str(fo) + ' MHz - B=' + str(B) + 'MHz')
#%% Corss correlation of triggered samples
#base_dir = "G:\\Team Drives\\LowBridging Phase-0\\TPM\\DATA\\"
#date = "2019-03-15"
#
#ant1 = "RX-06_SKALA-4.1"
#ant2 = "RX-02_SKALA-4.0"
#ant3 = "RX-05_SKALA-2"
#ant4 = "RX-01_EDA-2"
#
#
#antenna1 = ant1
#antenna2 = ant2
#antenna3 = ant3
#pol1 = "Pol-X"
#pol2 = 'Pol-X'
#
#
#dir1 = base_dir + date + "\\TRIGGER\\" + antenna1 + "\\"+ pol1+"\\"
#dir2 = base_dir + date + "\\TRIGGER\\" + antenna2 + "\\"+ pol2+"\\"
#dir3 = base_dir + date + "\\TRIGGER\\" + antenna3 + "\\"+ pol1+"\\"
#
#
#filepos = 0
#time_data1 = read_phase0_data(dir1,filepos)
#time_data2 = read_phase0_data(dir2,filepos)
##time_data3 = read_phase0_data(dir3,filepos)
#[freq1,Ant1_fft] = fft_calc(time_data1, 0)
#[freq2,Ant2_fft] = fft_calc(time_data2, 0)
#Crosscorr = np.abs(np.fft.ifft(Ant1_fft*np.conjugate(Ant2_fft)))
#
#
##[freq2,Ant2] = read_trigger_data(base_dir,date,antenna2,pol,1)
##[freq3,Ant3] = read_trigger_data(base_dir,date,antenna3,pol,1)
##[freq4,Ant4] = read_trigger_data(base_dir,date,antenna4,pol,1)
#
#plt.figure()
#plt.plot(np.reshape(Crosscorr,[131072]))
#plt.grid('on')
#plt.title('Corss Correlation')
#
#plt.figure()
#plt.plot(np.reshape(time_data1,[131072]))
#plt.plot(np.reshape(time_data2,[131072]))
#plt.grid('on')
#plt.title('Time domain data')
#plt.legend([antenna1+pol1,antenna2+pol2])
#
#plt.figure()
#plt.plot(freq1/1e6,10*np.log10(np.abs(np.reshape(Ant1_fft,[131072])**2)))
#plt.plot(freq2/1e6,10*np.log10(np.abs(np.reshape(Ant2_fft,[131072])**2)))
#plt.grid('on')
#plt.title('Freq domain')
#plt.legend([antenna1+pol1,antenna2+pol2])
| 2.53125 | 3 |
cpc/asm/Instruction.py | U-Ar/Cpresto | 1 | 12787917 | from .Assembly import Assembly
class Instruction(Assembly):
def __init__(self,a1,a2=None,a3=None,a4=None,a5=False):
if isinstance(a3,list):
self.mnemonic = a1
self.suffix = a2
self.operands = a3
self.need_relocation = a4
else :
if a4 != None:
self.mnemonic = a1
self.suffix = a2
self.operands = [a3,a4]
self.need_relocation = a5
elif a3 != None:
self.mnemonic = a1
self.suffix = a2
self.operands = [a3]
self.need_relocation = a5
else :
self.mnemonic = a1
self.suffix = ""
self.operands = []
self.need_relocation = False
def build(self, mnemonic,o1,o2=None):
if o2 == None:
return Instruction(mnemonic,self.suffix,[o1],self.need_relocation)
else :
return Instruction(mnemonic,self.suffix,[o1,o2],self.need_relocation)
def is_instruction(self):
return True
def mnemonic(self):
return self.mnemonic
def is_jump_instruction(self):
return self.mnemonic == "jmp" or self.mnemonic == "jz" or \
self.mnemonic == "jne" or self.mnemonic == "je"
def num_operands(self):
return len(self.operands)
def operand1(self):
return self.operands[0]
def operand2(self):
return self.operands[1]
def jmp_destination(self):
ref = self.operands[0]
return ref.value()
def collect_statistics(self,stats):
stats.instrunction_used(self.mnemonic)
for i in range(len(self.operands)):
self.operands[i].collect_statistics(stats)
def to_source(self,table):
buf = "\t"
buf += self.mnemonic + self.suffix
sep = "\t"
for i in range(len(self.operands)):
buf += sep
sep = ", "
buf += self.operands[i].to_source(table)
return buf
def to_string(self):
return "#<Insn " + self.mnemonic + ">"
def dump(self):
buf = "(Instruction "
buf += TextUtils.dump_string(self.mnemonic)
buf += " "
buf += TextUtils.dump_string(self.suffix)
for oper in self.operands:
buf += " " + oper.dump()
buf += ")"
return buf
| 3.28125 | 3 |
scripts/email_data_trigger.py | EdinburghGenomics/clarity_scripts | 2 | 12787918 | #!/usr/bin/env python
import platform
from EPPs.common import SendMailEPP
class DataReleaseTrigger(SendMailEPP):
"""Notifies the bioinformatics team to release data for a project."""
def _run(self):
if len(self.projects) > 1:
raise ValueError('More than one project present in step. Only one project per step permitted')
data_download_contacts = []
# There are up to 5 contacts entered in the step.
for count in range(1, 6):
udf_name1 = 'Data Download Contact Username %s' % count
udf_name2 = 'Is Contact %s A New or Existing User?' % count
if self.process.udf.get(udf_name1):
data_download_contacts.append(
'%s (%s)' % (self.process.udf.get(udf_name1), self.process.udf.get(udf_name2))
)
msg = '''Hi Bioinformatics,
Please release the data for {sample_count} sample(s) from project {project} shown at the link below:
{link}
The data contacts are:
{data_download_contacts}
Kind regards,
ClarityX'''
msg = msg.format(
link='https://' + platform.node() + '/clarity/work-details/' + self.step_id[3:],
sample_count=len(self.samples),
project=self.projects[0].name,
data_download_contacts='\n'.join(data_download_contacts)
)
subject = ', '.join(p.name for p in self.projects) + ': Please release data'
# Send email to list of persons specified in the default section of config
self.send_mail(subject, msg, config_name='projects-bioinformatics')
if __name__ == '__main__':
DataReleaseTrigger().run()
| 2.421875 | 2 |
t_rex/loader.py | RoboCopGay/TextAnalizer | 0 | 12787919 | <filename>t_rex/loader.py
import re as REGEX
from yaml import safe_load as yaml
from .dictionary import *
from .file import *
class Config:
class Element:
named_keys = []
class Local:
def __init__ ( self, _local ):
self.keywords = {}
self.names = None
if 'keywords' in _local:
self.keywords = _local['keywords']
_local.pop('keywords')
self.functions = _local
def __str__ (self):
return str(self.functions)
def __call__ (self):
return self.functions
class Child:
def __init__ ( self, _child ):
self.items = []
if type(_child) == str:
self.items = [_child]
elif type(_child) == dict:
for key in _child:
self.items.append({'name': key, 'replace': _child[key]})
elif type( _child == list ):
self.items = _child
def __str__ (self):
return str(self.items)
def __call__ (self):
return self.items
def __init__(self, _keys = {}, _key = None, _rep = None):
# Properties
self.functions = {}
self.locals = None
self.replace = _rep
self.pattern = _key
self.name = _key
self.childs = None
self.end = None
# Additioning data to properties
if _key and _rep:
_keys['fnc'] = { _key: _rep }
_keys['fnc'] = { _key: _rep }
ptn = None
if 'p' in _keys:
ptn = 'p'
elif 'ptn' in _keys:
ptn = 'ptn'
elif 'pattern' in _keys:
ptn = 'pattern'
if (ptn == 'p') or (ptn == 'ptn') or (ptn == 'pattern'):
self.pattern = _keys[ptn]
self.name = _key
if _keys:
for key in _keys:
if (key == 'f') or (key == 'fnc') or (key == 'function'):
self.functions = _keys[key]
if (key == 'l') or (key == 'lcl') or (key == 'local'):
self.locals = self.Local( _keys[key] )
if (key == 'r') or (key == 'rpl') or (key == 'replace'):
self.replace = _keys[key]
if (key == 'n') or (key == 'nam') or (key == 'name'):
self.name = _keys[key]
if (key == 'c') or (key == 'chd') or (key == 'childs'):
self.childs = self.Child( _keys[key] )
if (key == 'e') or (key == 'end'):
self.end = _keys[key]
def __str__ (self):
out = {
'name': self.name,
'key': self.pattern,
'end': self.end,
'functions': self.functions,
'locals': self.locals() if self.locals else self.locals,
'replace': self.replace
}
return str(out)
def __call__ (self):
out = {
'name': self.name,
'key': self.pattern,
'end': self.end,
'functions': self.functions,
'locals': self.locals(),
'replace': self.replace
}
return out
def __init__ (self, _file = None, _text = None ):
self.text = _text
if not _text:
self.file = File(_file)
self.text = self.file.text
self.properties = { ':identation': { True: '', False: ''}}
self.elements = {}
# Loading data
dictionary = yaml( self.text )
self.dict = dictionary
# Saving data
for key in dictionary:
if ':' == key[0]:
self.properties[key] = dictionary[key]
else:
if type (dictionary[key]) == str:
key_tmp = self.Element(_rep = dictionary[key], _key = key)
if key_tmp.name:
self.elements[key_tmp.name] = key_tmp
else:
self.elements[key] = key_tmp
elif type (dictionary[key]) == dict:
key_tmp = self.Element(_keys = dictionary[key], _key = key )
self.elements[key_tmp.name] = key_tmp
def __str__ (self):
return str(self.dict)
| 2.71875 | 3 |
app/__init__.py | Brunoro811/api_dangels | 0 | 12787920 | <gh_stars>0
from flask import Flask
from environs import Env
from app import routes
from app.configs import database, migrations, cors
from app.default import default_types_users, default_client, default_types_sales
env = Env()
env.read_env()
def create_app():
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = env("SQLALCHEMY_DATABASE_URI_POSTGRES")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["JSON_SORT_KEYS"] = False
cors.init_app(app)
database.init_app(app)
migrations.init_app(app)
routes.init_app(app)
with app.app_context():
default_types_users()
default_client()
default_types_sales()
return app
| 2.078125 | 2 |
pretrained_mol_sim/molecule_optimization/simulation9/grammar/run_bo.py | allisontam/graph_coattention | 0 | 12787921 | <filename>pretrained_mol_sim/molecule_optimization/simulation9/grammar/run_bo.py
import pickle
import gzip
def decode_from_latent_space(latent_points, grammar_model):
decode_attempts = 500
decoded_molecules = []
for i in range(decode_attempts):
current_decoded_molecules = grammar_model.decode(latent_points)
current_decoded_molecules = [ x if x != '' else 'Sequence too long' for x in current_decoded_molecules ]
decoded_molecules.append(current_decoded_molecules)
# We see which ones are decoded by rdkit
rdkit_molecules = []
for i in range(decode_attempts):
rdkit_molecules.append([])
for j in range(latent_points.shape[ 0 ]):
smile = np.array([ decoded_molecules[ i ][ j ] ]).astype('str')[ 0 ]
if MolFromSmiles(smile) is None:
rdkit_molecules[ i ].append(None)
else:
rdkit_molecules[ i ].append(smile)
import collections
decoded_molecules = np.array(decoded_molecules)
rdkit_molecules = np.array(rdkit_molecules)
final_smiles = []
for i in range(latent_points.shape[ 0 ]):
aux = collections.Counter(rdkit_molecules[ ~np.equal(rdkit_molecules[ :, i ], None) , i ])
if len(aux) > 0:
smile = aux.items()[ np.argmax(aux.values()) ][ 0 ]
else:
smile = None
final_smiles.append(smile)
return final_smiles
# We define the functions used to load and save objects
def save_object(obj, filename):
"""
Function that saves an object to a file using pickle
"""
result = pickle.dumps(obj)
with gzip.GzipFile(filename, 'wb') as dest: dest.write(result)
dest.close()
def load_object(filename):
"""
Function that loads an object from a file using pickle
"""
with gzip.GzipFile(filename, 'rb') as source: result = source.read()
ret = pickle.loads(result)
source.close()
return ret
from sparse_gp import SparseGP
import scipy.stats as sps
import numpy as np
random_seed = int(np.loadtxt('../random_seed.txt'))
np.random.seed(random_seed)
# We load the data
X = np.loadtxt('../../latent_features_and_targets_grammar/latent_faetures.txt')
y = -np.loadtxt('../../latent_features_and_targets_grammar/targets.txt')
y = y.reshape((-1, 1))
n = X.shape[ 0 ]
permutation = np.random.choice(n, n, replace = False)
X_train = X[ permutation, : ][ 0 : np.int(np.round(0.9 * n)), : ]
X_test = X[ permutation, : ][ np.int(np.round(0.9 * n)) :, : ]
y_train = y[ permutation ][ 0 : np.int(np.round(0.9 * n)) ]
y_test = y[ permutation ][ np.int(np.round(0.9 * n)) : ]
import os.path
np.random.seed(random_seed)
iteration = 0
while iteration < 5:
# We fit the GP
np.random.seed(iteration * random_seed)
M = 500
sgp = SparseGP(X_train, 0 * X_train, y_train, M)
sgp.train_via_ADAM(X_train, 0 * X_train, y_train, X_test, X_test * 0, \
y_test, minibatch_size = 10 * M, max_iterations = 50, learning_rate = 0.0005)
pred, uncert = sgp.predict(X_test, 0 * X_test)
error = np.sqrt(np.mean((pred - y_test)**2))
testll = np.mean(sps.norm.logpdf(pred - y_test, scale = np.sqrt(uncert)))
print 'Test RMSE: ', error
print 'Test ll: ', testll
pred, uncert = sgp.predict(X_train, 0 * X_train)
error = np.sqrt(np.mean((pred - y_train)**2))
trainll = np.mean(sps.norm.logpdf(pred - y_train, scale = np.sqrt(uncert)))
print 'Train RMSE: ', error
print 'Train ll: ', trainll
# We load the decoder to obtain the molecules
from rdkit.Chem import MolFromSmiles, MolToSmiles
from rdkit.Chem import Draw
import image
import copy
import time
import sys
sys.path.insert(0, '../../../')
import molecule_vae
grammar_weights = '../../../pretrained/zinc_vae_grammar_L56_E100_val.hdf5'
grammar_model = molecule_vae.ZincGrammarModel(grammar_weights)
# We pick the next 50 inputs
next_inputs = sgp.batched_greedy_ei(50, np.min(X_train, 0), np.max(X_train, 0))
valid_smiles_final = decode_from_latent_space(next_inputs, grammar_model)
from rdkit.Chem import Descriptors
from rdkit.Chem import MolFromSmiles, MolToSmiles
new_features = next_inputs
save_object(valid_smiles_final, "results/valid_smiles{}.dat".format(iteration))
logP_values = np.loadtxt('../../latent_features_and_targets_grammar/logP_values.txt')
SA_scores = np.loadtxt('../../latent_features_and_targets_grammar/SA_scores.txt')
cycle_scores = np.loadtxt('../../latent_features_and_targets_grammar/cycle_scores.txt')
SA_scores_normalized = (np.array(SA_scores) - np.mean(SA_scores)) / np.std(SA_scores)
logP_values_normalized = (np.array(logP_values) - np.mean(logP_values)) / np.std(logP_values)
cycle_scores_normalized = (np.array(cycle_scores) - np.mean(cycle_scores)) / np.std(cycle_scores)
targets = SA_scores_normalized + logP_values_normalized + cycle_scores_normalized
import sascorer
import networkx as nx
from rdkit.Chem import rdmolops
scores = []
for i in range(len(valid_smiles_final)):
if valid_smiles_final[ i ] is not None:
current_log_P_value = Descriptors.MolLogP(MolFromSmiles(valid_smiles_final[ i ]))
current_SA_score = -sascorer.calculateScore(MolFromSmiles(valid_smiles_final[ i ]))
cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(MolFromSmiles(valid_smiles_final[ i ]))))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([ len(j) for j in cycle_list ])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
current_cycle_score = -cycle_length
current_SA_score_normalized = (current_SA_score - np.mean(SA_scores)) / np.std(SA_scores)
current_log_P_value_normalized = (current_log_P_value - np.mean(logP_values)) / np.std(logP_values)
current_cycle_score_normalized = (current_cycle_score - np.mean(cycle_scores)) / np.std(cycle_scores)
score = (current_SA_score_normalized + current_log_P_value_normalized + current_cycle_score_normalized)
else:
score = -max(y)[ 0 ]
scores.append(-score)
print(i)
print(valid_smiles_final)
print(scores)
save_object(scores, "results/scores{}.dat".format(iteration))
if len(new_features) > 0:
X_train = np.concatenate([ X_train, new_features ], 0)
y_train = np.concatenate([ y_train, np.array(scores)[ :, None ] ], 0)
iteration += 1
print(iteration)
| 2.421875 | 2 |
pyFileFixity/replication_repair.py | hadi-f90/pyFileFixity | 0 | 12787922 | #!/usr/bin/env python
#
# Replication repair
# Copyright (C) 2015 <NAME>
#
# Licensed under the MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#=================================
# Replication repair
# by <NAME>
# License: MIT
# Creation date: 2015-11-16
#=================================
#
from _infos import __version__
# Include the lib folder in the python import path (so that packaged modules can be easily called, such as gooey which always call its submodules via gooey parent module)
import sys, os
thispathname = os.path.dirname(__file__)
sys.path.append(os.path.join(thispathname, 'lib'))
# Import necessary libraries
import rfigc # optional
import shutil
from lib.aux_funcs import recwalk, path2unix, fullpath, is_dir_or_file, is_dir, is_file, create_dir_if_not_exist
import lib.argparse as argparse
import datetime, time
import lib.tqdm as tqdm
import itertools
import math
#import operator # to get the max out of a dict
import csv # to process the database file from rfigc.py
import shlex # for string parsing as argv argument to main(), unnecessary otherwise
from lib.tee import Tee # Redirect print output to the terminal as well as in a log file
#import pprint # Unnecessary, used only for debugging purposes
#***********************************
# AUXILIARY FUNCTIONS
#***********************************
def relpath_posix(recwalk_result, pardir, fromwinpath=False):
''' Helper function to convert all paths to relative posix like paths (to ease comparison) '''
return recwalk_result[0], path2unix(os.path.join(os.path.relpath(recwalk_result[0], pardir),recwalk_result[1]), nojoin=True, fromwinpath=fromwinpath)
#def checkAllEqual(lst):
# return not lst or [lst[0]]*len(lst) == lst
def sort_dict_of_paths(d):
""" Sort a dict containing paths parts (ie, paths divided in parts and stored as a list). Top paths will be given precedence over deeper paths. """
# Find the path that is the deepest, and count the number of parts
max_rec = max(len(x) if x else 0 for x in d.values())
# Pad other paths with empty parts to fill in, so that all paths will have the same number of parts (necessary to compare correctly, else deeper paths may get precedence over top ones, since the folder name will be compared to filenames!)
for key in d.keys():
if d[key]:
d[key] = ['']*(max_rec-len(d[key])) + d[key]
return sorted(d.items(), key=lambda x: x[1])
def sort_group(d, return_only_first=False):
''' Sort a dictionary of relative paths and cluster equal paths together at the same time '''
# First, sort the paths in order (this must be a couple: (parent_dir, filename), so that there's no ambiguity because else a file at root will be considered as being after a folder/file since the ordering is done alphabetically without any notion of tree structure).
d_sort = sort_dict_of_paths(d)
# Pop the first item in the ordered list
base_elt = (-1, None)
while (base_elt[1] is None and d_sort):
base_elt = d_sort.pop(0)
if base_elt[1] is None:
return None
# Init by creating the first group and pushing the first ordered filepath into the first group
lst = [[base_elt]]
if d_sort:
# For each subsequent filepath
for elt in d_sort:
# If the filepath is not empty (generator died)
if elt[1] is not None:
# If the filepath is the same to the latest grouped filepath, we add it to the same group
if elt[1] == base_elt[1]:
lst[-1].append(elt)
# Else the filepath is different: we create a new group, add the filepath to this group, and replace the latest grouped filepath
else:
if return_only_first: break # break here if we only need the first group
lst.append([elt])
base_elt = elt # replace the latest grouped filepath
return lst
def majority_vote_byte_scan(relfilepath, fileslist, outpath, blocksize=65535, default_char_null=False):
'''Takes a list of files in string format representing the same data, and disambiguate by majority vote: for position in string, if the character is not the same accross all entries, we keep the major one. If none, it will be replaced by a null byte (because we can't know if any of the entries are correct about this character).
relfilepath is the filename or the relative file path relative to the parent directory (ie, this is the relative path so that we can compare the files from several directories).'''
# The idea of replication combined with ECC was a bit inspired by this paper: <NAME>, <NAME>, and <NAME>. "Combining Erasure-Code and Replication Redundancy Schemes for Increased Storage and Repair Efficiency in P2P Storage Systems.", 2013, Technion, Computer Science Department, Technical Report CS-2013-03
# But it is a very well known concept in redundancy engineering, usually called triple-modular redundancy (which is here extended to n-modular since we can supply any number of files we want, not just three).
# Preference in case of ambiguity is always given to the file of the first folder.
fileshandles = []
for filepath in fileslist:
if filepath:
# Already a file handle? Just store it in the fileshandles list
if hasattr(filepath, 'read'):
fileshandles.append(filepath)
# Else it's a string filepath, open the file
else:
fileshandles.append(open(filepath, 'rb'))
# Create and open output (merged) file, except if we were already given a file handle
if hasattr(outpath, 'write'):
outfile = outpath
else:
outpathfull = os.path.join(outpath, relfilepath)
pardir = os.path.dirname(outpathfull)
if not os.path.exists(pardir):
os.makedirs(pardir)
outfile = open(outpathfull, 'wb')
# Cannot vote if there's not at least 3 files!
# In this case, just copy the file from the first folder, verbatim
if len(fileshandles) < 3:
# If there's at least one input file, then copy it verbatim to the output folder
if fileshandles:
create_dir_if_not_exist(os.path.dirname(outpathfull))
buf = 1
while (buf):
buf = fileshandles[0].read()
outfile.write(buf)
outfile.flush()
return (1, "Error with file %s: only %i copies available, cannot vote (need at least 3)! Copied the first file from the first folder, verbatim." % (relfilepath, len(fileshandles)))
errors = []
entries = [1]*len(fileshandles) # init with 0 to start the while loop
while (entries.count('') < len(fileshandles)):
final_entry = []
# Read a block from all input files into memory
for i in xrange(len(fileshandles)):
entries[i] = fileshandles[i].read(blocksize)
# End of file for all files, we exit
if entries.count('') == len(fileshandles):
break
# Else if there's only one file, just copy the file's content over
elif len(entries) == 1:
final_entry = entries[0]
# Else, do the majority vote
else:
# Walk along each column (imagine the strings being rows in a matrix, then we pick one column at each iteration = all characters at position i of each string), so that we can compare these characters easily
for i in xrange(max(len(entry) for entry in entries)):
hist = {} # kind of histogram, we just memorize how many times a character is presented at the position i in each string TODO: use collections.Counter instead of dict()?
# Extract the character at position i of each string and compute the histogram at the same time (number of time this character appear among all strings at this position i)
for entry in entries:
# Check if we are not beyond the current entry's length
if i < len(entry): # TODO: check this line, this should allow the vote to continue even if some files are shorter than others
# Extract the character and use it to contribute to the histogram
# TODO: add warning message when one file is not of the same size as the others
key = str(ord(entry[i])) # convert to the ascii value to avoid any funky problem with encoding in dict keys
hist[key] = hist.get(key, 0) + 1 # increment histogram for this value. If it does not exists, use 0. (essentially equivalent to hist[key] += 1 but with exception management if key did not already exists)
# If there's only one character (it's the same accross all strings at position i), then it's an exact match, we just save the character and we can skip to the next iteration
if len(hist) == 1:
final_entry.append(chr(int(hist.iterkeys().next())))
continue
# Else, the character is different among different entries, we will pick the major one (mode)
elif len(hist) > 1:
# Sort the dict by value (and reverse because we want the most frequent first)
skeys = sorted(hist, key=hist.get, reverse=True)
# Ambiguity! If each entries present a different character (thus the major has only an occurrence of 1), then it's too ambiguous and we just set a null byte to signal that
if hist[skeys[0]] == 1:
if default_char_null:
if default_char_null is True:
final_entry.append("\x00")
else:
final_entry.append(default_char_null)
else:
# Use the entry of the first file that is still open
first_char = ''
for entry in entries:
# Found the first file that has a character at this position: store it and break loop
if i < len(entry):
first_char = entry[i]
break
# Use this character in spite of ambiguity
final_entry.append(first_char)
errors.append(outfile.tell() + i) # Print an error indicating the characters that failed
# Else if there is a tie (at least two characters appear with the same frequency), then we just pick one of them
elif hist[skeys[0]] == hist[skeys[1]]:
final_entry.append(chr(int(skeys[0]))) # TODO: find a way to account for both characters. Maybe return two different strings that will both have to be tested? (eg: maybe one has a tampered hash, both will be tested and if one correction pass the hash then it's ok we found the correct one)
# Else we have a clear major character that appear in more entries than any other character, then we keep this one
else:
final_entry.append(chr(int(skeys[0]))) # alternative one-liner: max(hist.iteritems(), key=operator.itemgetter(1))[0]
continue
# Concatenate to a string (this is faster than using a string from the start and concatenating at each iteration because Python strings are immutable so Python has to copy over the whole string, it's in O(n^2)
final_entry = ''.join(final_entry)
# Commit to output file
outfile.write(final_entry)
outfile.flush()
# Errors signaling
if errors:
error_msg = "Unrecoverable corruptions (because of ambiguity) in file %s on characters: %s." % (relfilepath, [hex(int(x)) for x in errors]) # Signal to user that this file has unrecoverable corruptions (he may try to fix the bits manually or with his own script)
return (1, error_msg) # return an error
# Close all input files
for fh in fileshandles:
fh.close()
# Close output file
if outfile != outpath: # close only if we were not given a file handle in the first place
outfile.flush()
outfile.close()
return (0, None)
def synchronize_files(inputpaths, outpath, database=None, tqdm_bar=None, report_file=None, ptee=None, verbose=False):
''' Main function to synchronize files contents by majority vote
The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one.
The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures.
'''
# (Generator) Files Synchronization Algorithm:
# Needs a function stable_dir_walking, which will walk through directories recursively but in always the same order on all platforms (same order for files but also for folders), whatever order it is, as long as it is stable.
# Until there's no file in any of the input folders to be processed:
# - curfiles <- load first file for each folder by using stable_dir_walking on each input folder.
# - curfiles_grouped <- group curfiles_ordered:
# * curfiles_ordered <- order curfiles alphabetically (need to separate the relative parent directory and the filename, to account for both without ambiguity)
# * curfiles_grouped <- empty list
# * curfiles_grouped[0] = add first element in curfiles_ordered
# * last_group = 0
# * for every subsequent element nextelt in curfiles_ordered:
# . if nextelt == curfiles_grouped[last_group][0]: add nextelt into curfiles_grouped[last_group] (the latest group in curfiles_grouped)
# . else: create a new group in curfiles_grouped (last_group += 1) and add nextelt into curfiles_grouped[last_group]
# At this stage, curfiles_grouped[0] should contain a group of files with the same relative filepath from different input folders, and since we used stable_dir_walking, we are guaranteed that this file is the next to be processed in alphabetical order.
# - Majority vote byte-by-byte for each of curfiles_grouped[0], and output winning byte to the output file.
# - Update files list alignment: we will now ditch files in curfiles_grouped[0] from curfiles, and replace by the next files respectively from each respective folder. Since we processed in alphabetical (or whatever) order, the next loaded files will match the files in other curfiles_grouped groups that we could not process before.
# At this point (after the loop), all input files have been processed in order, without maintaining the whole files list in memory, just one file per input folder.
# Init files walking generator for each inputpaths
recgen = [recwalk(path, sorting=True) for path in inputpaths]
curfiles = {}
recgen_exhausted = {}
recgen_exhausted_count = 0
nbpaths = len(inputpaths)
retcode = 0
if not ptee: ptee = sys.stdout
# Open report file and write header
if report_file is not None:
rfile = open(report_file, 'wb')
r_writer = csv.writer(rfile, delimiter='|', lineterminator='\n', quotechar='"')
r_header = ["filepath"] + ["dir%i" % (i+1) for i in xrange(nbpaths)] + ["hash-correct", "error_code", "errors"]
r_length = len(r_header)
r_writer.writerow(r_header)
# Initialization: load the first batch of files, one for each folder
for i in xrange(len(recgen)):
recgen_exhausted[i] = False
try:
if curfiles.get(i, None) is None:
curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1]
except StopIteration:
recgen_exhausted[i] = True
recgen_exhausted_count += 1
# Files lists alignment loop
while recgen_exhausted_count < nbpaths:
errcode = 0
errmsg = None
# Init a new report's row
if report_file: r_row = ["-"] * r_length
# -- Group equivalent relative filepaths together
#print curfiles # debug
curfiles_grouped = sort_group(curfiles, True)
# -- Extract first group of equivalent filepaths (this allows us to process with the same alphabetical order on all platforms)
# Note that the remaining files in other groups will be processed later, because their alphabetical order is higher to the first group, this means that the first group is to be processed now
to_process = curfiles_grouped[0]
#print to_process # debug
# -- Byte-by-byte majority vote on the first group of files
# Need the relative filepath also (note that there's only one since it's a group of equivalent relative filepaths, only the absolute path is different between files of a same group)
relfilepath = path2unix(os.path.join(*to_process[0][1]))
if report_file: r_row[0] = relfilepath
if verbose: ptee.write("- Processing file %s." % relfilepath)
# Generate output path
outpathfull = os.path.join(outpath, relfilepath)
create_dir_if_not_exist(os.path.dirname(outpathfull))
# Initialize the list of absolute filepaths
fileslist = []
for elt in to_process:
i = elt[0]
fileslist.append(os.path.join(inputpaths[i], os.path.join(*elt[1])))
if report_file: r_row[i+1] = 'X' # put an X in the report file below each folder that contains this file
# If there's only one file, just copy it over
if len(to_process) == 1:
shutil.copyfile(fileslist[0], outpathfull)
id = to_process[0][0]
if report_file: r_row[id+1] = 'O'
# Else, merge by majority vote
else:
# Before-merge check using rfigc database, if provided
# If one of the files in the input folders is already correct, just copy it over
correct_file = None
if database:
for id, filepath in enumerate(fileslist):
if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (filepath, database)) == 0:
correct_file = filepath
correct_id = to_process[id][0]
break
# If one correct file was found, copy it over
if correct_file:
create_dir_if_not_exist(os.path.dirname(outpathfull))
shutil.copyfile(correct_file, outpathfull)
if report_file:
r_row[correct_id+1] = "O"
r_row[-3] = "OK"
# Else, we need to do the majority vote merge
else:
# Do the majority vote merge
errcode, errmsg = majority_vote_byte_scan(relfilepath, fileslist, outpath)
# After-merge/move check using rfigc database, if provided
if database:
if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (outpathfull, database)) == 1:
errcode = 1
r_row[-3] = "KO"
if not errmsg: errmsg = ''
errmsg += " File could not be totally repaired according to rfigc database."
else:
if report_file:
r_row[-3] = "OK"
if errmsg: errmsg += " But merged file is correct according to rfigc database."
# Display errors if any
if errcode:
if report_file:
r_row[-2] = "KO"
r_row[-1] = errmsg
ptee.write(errmsg)
retcode = 1
else:
if report_file: r_row[-2] = "OK"
# Save current report's row
if report_file:
r_writer.writerow(r_row)
# -- Update files lists alignment (ie, retrieve new files but while trying to keep the alignment)
for elt in to_process: # for files of the first group (the ones we processed)
i = elt[0]
# Walk their respective folders and load up the next file
try:
if not recgen_exhausted.get(i, False):
curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1]
# If there's no file left in this folder, mark this input folder as exhausted and continue with the others
except StopIteration:
curfiles[i] = None
recgen_exhausted[i] = True
recgen_exhausted_count += 1
if tqdm_bar: tqdm_bar.update()
if tqdm_bar: tqdm_bar.close()
# Closing report file
if report_file:
# Write list of directories and legend
rfile.write("\n=> Input directories:")
for id, ipath in enumerate(inputpaths):
rfile.write("\n\t- dir%i = %s" % ((id+1), ipath))
rfile.write("\n=> Output directory: %s" % outpath)
rfile.write("\n=> Legend: X=existing/selected for majority vote, O=only used this file, - = not existing, OK = check correct, KO = check incorrect (file was not recovered)\n")
# Close the report file handle
rfile.close()
return retcode
#***********************************
# GUI AUX FUNCTIONS
#***********************************
# Try to import Gooey for GUI display, but manage exception so that we replace the Gooey decorator by a dummy function that will just return the main function as-is, thus keeping the compatibility with command-line usage
try: # pragma: no cover
import lib.gooey as gooey
except ImportError as exc:
# Define a dummy replacement function for Gooey to stay compatible with command-line usage
class gooey(object): # pragma: no cover
def Gooey(func):
return func
# If --gui was specified, then there's a problem
if len(sys.argv) > 1 and sys.argv[1] == '--gui': # pragma: no cover
print('ERROR: --gui specified but an error happened with lib/gooey, cannot load the GUI (however you can still use this script in commandline). Check that lib/gooey exists and that you have wxpython installed. Here is the error: ')
raise(exc)
def conditional_decorator(flag, dec): # pragma: no cover
def decorate(fn):
if flag:
return dec(fn)
else:
return fn
return decorate
def check_gui_arg(): # pragma: no cover
'''Check that the --gui argument was passed, and if true, we remove the --gui option and replace by --gui_launched so that Gooey does not loop infinitely'''
if len(sys.argv) > 1 and sys.argv[1] == '--gui':
# DEPRECATED since Gooey automatically supply a --ignore-gooey argument when calling back the script for processing
#sys.argv[1] = '--gui_launched' # CRITICAL: need to remove/replace the --gui argument, else it will stay in memory and when Gooey will call the script again, it will be stuck in an infinite loop calling back and forth between this script and Gooey. Thus, we need to remove this argument, but we also need to be aware that Gooey was called so that we can call gooey.GooeyParser() instead of argparse.ArgumentParser() (for better fields management like checkboxes for boolean arguments). To solve both issues, we replace the argument --gui by another internal argument --gui_launched.
return True
else:
return False
def AutoGooey(fn): # pragma: no cover
'''Automatically show a Gooey GUI if --gui is passed as the first argument, else it will just run the function as normal'''
if check_gui_arg():
return gooey.Gooey(fn)
else:
return fn
#***********************************
# MAIN
#***********************************
#@conditional_decorator(check_gui_arg(), gooey.Gooey) # alternative to AutoGooey which also correctly works
@AutoGooey
def main(argv=None):
if argv is None: # if argv is empty, fetch from the commandline
argv = sys.argv[1:]
elif isinstance(argv, basestring): # else if argv is supplied but it's a simple string, we need to parse it to a list of arguments before handing to argparse or any other argument parser
argv = shlex.split(argv) # Parse string just like argv using shlex
#==== COMMANDLINE PARSER ====
#== Commandline description
desc = '''Replication Repair
Description: Given a set of directories (or files), try to repair your files by scanning each byte, cast a majority vote among all copies, and then output the winning byte. This process is usually called triple-modular redundancy (but here it should be called n-modular redundancy since you can use as many copies as you have).
It is recommended for long term storage to store several copies of your files on different storage mediums. Everything's fine until all your copies are partially corrupted. In this case, this script can help you, by taking advantage of your multiple copies, without requiring a pregenerated ecc file. Just specify the path to every copies, and the script will try to recover them.
Replication can repair exactly r-2 errors using majority vote (you need at least 2 blocks for majority vote to work), where r is the number of replications: if r=3, you get a redundancy rate of 1/3, if r=4, rate is 2/4, etc.
This script can also take advantage of a database generated by rfigc.py to make sure that the recovered files are correct, or to select files that are already correct.
Note: in case the end result is not what you expected, you can try a different order of input directories: in case of ambiguity, the first input folder has precedence over subsequent folders.
Note2: in case some files with the same names are of different length, the merging will continue until the longest file is exhausted.
Note3: last modification date is not (yet) accounted for.
'''
ep = '''Use --gui as the first argument to use with a GUI (via Gooey).
'''
#== Commandline arguments
#-- Constructing the parser
# Use GooeyParser if we want the GUI because it will provide better widgets
if len(argv) > 0 and (argv[0] == '--gui' and not '--ignore-gooey' in argv): # pragma: no cover
# Initialize the Gooey parser
main_parser = gooey.GooeyParser(add_help=True, description=desc, epilog=ep, formatter_class=argparse.RawTextHelpFormatter)
# Define Gooey widget types explicitly (because type auto-detection doesn't work quite well)
widget_dir = {"widget": "DirChooser"}
widget_filesave = {"widget": "FileSaver"}
widget_file = {"widget": "FileChooser"}
widget_text = {"widget": "TextField"}
widget_multidir = {"widget": "MultiDirChooser"}
else: # Else in command-line usage, use the standard argparse
# Delete the special argument to avoid unrecognized argument error in argparse
if '--ignore-gooey' in argv[0]: argv.remove('--ignore-gooey') # this argument is automatically fed by Gooey when the user clicks on Start
# Initialize the normal argparse parser
main_parser = argparse.ArgumentParser(add_help=True, description=desc, epilog=ep, formatter_class=argparse.RawTextHelpFormatter)
# Define dummy dict to keep compatibile with command-line usage
widget_dir = {}
widget_filesave = {}
widget_file = {}
widget_text = {}
widget_multidir = {}
# Required arguments
main_parser.add_argument('-i', '--input', metavar='"/path/to/copy1/" "/path/to/copy2/" "etc."', type=is_dir_or_file, nargs='+', required=True,
help='Specify the paths to every copies you have (minimum 3 copies, else it won\'t work!). Can be folders or files (if you want to repair only one file). Order matters: in case of ambiguity, the first folder where the file exists will be chosen.', **widget_multidir)
main_parser.add_argument('-o', '--output', metavar='/ouput/folder/', nargs=1, required=True,
help='Where the recovered files will be stored.', **widget_dir)
# Optional general arguments
main_parser.add_argument('-d', '--database', metavar='database.csv', type=is_file, required=False,
help='Path to a previously generated rfigc.py database. If provided, this will be used to check that the repaired files are correct (and also to find already correct files in copies).', **widget_file)
main_parser.add_argument('-r', '--report', metavar='/some/folder/report.csv', type=str, required=False,
help='Save all results of the repair process in a report file, with detailed descriptions of ambiguous repairs (ie, when majority vote came to a draw).', **widget_filesave)
main_parser.add_argument('-l', '--log', metavar='/some/folder/filename.log', type=str, nargs=1, required=False,
help='Path to the log file. (Output will be piped to both the stdout and the log file)', **widget_filesave)
main_parser.add_argument('-f', '--force', action='store_true', required=False, default=False,
help='Force overwriting the output folder even if it already exists.')
main_parser.add_argument('-v', '--verbose', action='store_true', required=False, default=False,
help='Verbose mode (show more output).')
main_parser.add_argument('--silent', action='store_true', required=False, default=False,
help='No console output (but if --log specified, the log will still be saved in the specified file).')
#== Parsing the arguments
args = main_parser.parse_args(argv) # Storing all arguments to args
#-- Set variables from arguments
inputpaths = [fullpath(x) for x in args.input] # path to the files to repair (ie, paths to all the different copies the user has)
outputpath = fullpath(args.output[0])
force = args.force
verbose = args.verbose
silent = args.silent
if len(inputpaths) < 3:
raise Exception('Need at least 3 copies to do a replication repair/majority vote!')
#if os.path.isfile(inputpath): # if inputpath is a single file (instead of a folder), then define the rootfolderpath as the parent directory (for correct relative path generation, else it will also truncate the filename!)
#rootfolderpath = os.path.dirname(inputpath)
report_file = None
if args.report: report_file = os.path.basename(fullpath(args.report))
database = None
if args.database: database = args.database
# -- Checking arguments
if os.path.exists(outputpath) and not force:
raise NameError('Specified output path %s already exists! Use --force if you want to overwrite.' % outputpath)
if database and not os.path.isfile(database):
raise NameError('Specified rfigc database file %s does not exist!' % database)
# -- Configure the log file if enabled (ptee.write() will write to both stdout/console and to the log file)
if args.log:
ptee = Tee(args.log[0], 'a', nostdout=silent)
#sys.stdout = Tee(args.log[0], 'a')
sys.stderr = Tee(args.log[0], 'a', nostdout=silent)
else:
ptee = Tee(nostdout=silent)
# == PROCESSING BRANCHING == #
# == Precomputation of ecc file size
# Precomputing is important so that the user can know what size to expect before starting (and how much time it will take...).
filescount = 0
sizetotal = 0
sizeheaders = 0
visitedfiles = {}
ptee.write("Precomputing list of files and predicted statistics...")
prebar = tqdm.tqdm(file=ptee, disable=silent)
for inputpath in inputpaths:
for (dirpath, filename) in recwalk(inputpath):
# Get full absolute filepath
filepath = os.path.join(dirpath, filename)
relfilepath = path2unix(os.path.relpath(filepath, inputpath)) # File relative path from the root (we truncate the rootfolderpath so that we can easily check the files later even if the absolute path is different)
# Only increase the files count if we didn't see this file before
if not visitedfiles.get(relfilepath, None):
# Counting the total number of files we will process (so that we can show a progress bar with ETA)
filescount = filescount + 1
# Add the file to the list of already visited files
visitedfiles[relfilepath] = True
# Get the current file's size
size = os.stat(filepath).st_size
# Compute total size of all files
sizetotal = sizetotal + size
prebar.update()
prebar.close()
ptee.write("Precomputing done.")
# == Majority vote repair
# For each folder, align the files lists and then majority vote over each byte to repair
ptee.write("====================================")
ptee.write("Replication repair, started on %s" % datetime.datetime.now().isoformat())
ptee.write("====================================")
# Prepare progress bar if necessary
if silent:
tqdm_bar = None
else:
tqdm_bar = tqdm.tqdm(total=filescount, file=ptee, leave=True, unit="files")
# Call the main function to synchronize files using majority vote
errcode = synchronize_files(inputpaths, outputpath, database=database, tqdm_bar=tqdm_bar, report_file=report_file, ptee=ptee, verbose=verbose)
#ptee.write("All done! Stats:\n- Total files processed: %i\n- Total files corrupted: %i\n- Total files repaired completely: %i\n- Total files repaired partially: %i\n- Total files corrupted but not repaired at all: %i\n- Total files skipped: %i" % (files_count, files_corrupted, files_repaired_completely, files_repaired_partially, files_corrupted - (files_repaired_partially + files_repaired_completely), files_skipped) )
if tqdm_bar: tqdm_bar.close()
ptee.write("All done!")
if report_file: ptee.write("Saved replication repair results in report file: %s" % report_file)
del ptee
return errcode
# Calling main function if the script is directly called (not imported as a library in another program)
if __name__ == "__main__": # pragma: no cover
sys.exit(main())
| 1.59375 | 2 |
application.py | archman/unicorn-webapp | 1 | 12787923 | # -*- coding: utf-8 -*-
from app import app as application
if __name__ == '__main__':
application.run(host="0.0.0.0", debug=True)
| 1.382813 | 1 |
src/pykemon_battle/pokemon.py | icydux/pykemon_battle | 0 | 12787924 | from .utils.utilities import (
get_pokemon_info,
choose_best_moveset,
randomly_choose_moveset,
manually_choose_moveset,
choose_first_four_moves_for_now,
)
from .move import Move
class Pokemon:
"""
A pokemon is a class that represents a pokemon.
"""
def __init__(self, poke_id):
self.json = get_pokemon_info(poke_id=poke_id)
self.name = self.json["name"]
self.type = list(slot["type"]["name"] for slot in self.json["types"])
self.heal()
self.reset()
self.moveset = None
# self.get_moves()
def reset(self):
"""
Resets the pokemon's health points to its base stat.
"""
attack = self.json["stats"][1]["base_stat"]
defense = self.json["stats"][2]["base_stat"]
special_attack = self.json["stats"][3]["base_stat"]
special_defense = self.json["stats"][4]["base_stat"]
speed = self.json["stats"][5]["base_stat"]
self.stats = {
"attack": attack,
"defense": defense,
"special_attack": special_attack,
"special_defense": special_defense,
"speed": speed,
}
def heal(self):
"""
Heals the pokemon to its base stat.
"""
self.health_points = self.json["stats"][0]["base_stat"]
self.status = "active"
def get_moves(self, move_selection="Random"):
"""
Returns a list of moves that the pokemon can use.
"""
all_possible_moves = self.json["moves"]
if move_selection == "1" or move_selection.lower() == "automatic":
selected_moves = choose_best_moveset(all_possible_moves)
elif move_selection == "2" or move_selection.lower() == "manual":
selected_moves = manually_choose_moveset(all_possible_moves)
elif move_selection == "3" or move_selection.lower() == "random":
selected_moves = randomly_choose_moveset(all_possible_moves)
else:
selected_moves = choose_first_four_moves_for_now(all_possible_moves)
self.moveset = (
Move(selected_moves[0]),
Move(selected_moves[1]),
Move(selected_moves[2]),
Move(selected_moves[3]),
)
def __repr__(self):
return f"{self.name.capitalize()}"
# demo_poke = Pokemon("Silcoon")
# demo_poke.get_moves()
| 3.03125 | 3 |
bf_bid.py | dssg/mlpolicylab_fall20_bills1_public | 0 | 12787925 | import pandas as pd
import psycopg2 as pg2
import yaml
import io
import ohio.ext.pandas
from sqlalchemy import create_engine
def open_db_connection(secrets_file="secrets.yaml", verbose=True):
"""
Opens connection to psql db
:return:
connection object
"""
try:
with open(secrets_file, 'r') as f:
# loads contents of secrets.yaml into a python dictionary
secret_config = yaml.safe_load(f.read())
db_params = secret_config['db']
except FileNotFoundError:
print("Cannot establish connection to database. Please provide db_params in secrets.yaml file.")
exit(1)
conn = pg2.connect(
host=db_params['host'],
port=db_params['port'],
dbname=db_params['dbname'],
user=db_params['user'],
password=db_params['password']
)
if verbose:
print(f"Connection opened to database {db_params['dbname']}")
return conn
connection = open_db_connection()
def write_df_in_table(conn, df, schema_name, table_name):
"""write pandas dataframe in table
Args:
conn: a pSQL databse connection object
df: a pandas dataframe to write to the database
schema_name: name of the schema for the table
table_name: name of the table
"""
# write df to memory buffer
SEP = "~"
buffer = io.StringIO()
df.to_csv(buffer, index_label='id', header=False, sep=SEP)
buffer.seek(0)
type_mapping = {'int64': 'integer', 'float64': 'double precision', 'object': 'varchar'}
cur = conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {schema_name}.{table_name};")
cur.execute(f"CREATE TABLE {schema_name}.{table_name} (id integer PRIMARY KEY);")
# cur.execute(f"GRANT ALL PRIVILEGES ON {schema_name}.{table_name} TO bills1;")
cur.execute(f"ALTER TABLE {schema_name}.{table_name} OWNER TO bills1;")
# create table column
for col_name, col_type in zip(df.columns, df.dtypes):
print(col_name)
col_type = type_mapping[str(col_type)]
cur.execute(f"ALTER table {schema_name}.{table_name} ADD COLUMN {col_name} {col_type};")
# hard-coded for now, may be made dynamic later
# TODO: need to figure out how to change NULL values to date as well
#if col_name == "introduced_date":
# cur.execute(f"""ALTER table {schema_name}.{table_name} ALTER COLUMN {col_name}
# TYPE date using to_date({col_name}, 'YYYY-MM-DD');""")
# copy data from buffer to table
cur.copy_from(buffer, f'{schema_name}.{table_name}', sep=SEP)
conn.commit()
cur.close()
# If you need to recreate the SQL tables for whatever reason
object = pd.read_pickle(r'/data/groups/bills1/mlpolicylab_fall20_bills1/bid_groups.pkl')
white_df = pd.DataFrame(object['white'], columns=['bill_id'])
write_df_in_table(conn=connection, df=white_df, schema_name="sketch", table_name="reference_bills_w")
"""
black_df = pd.DataFrame(object['black'], columns=['bill_id'])
asian_df = pd.DataFrame(object['asian'], columns=['bill_id'])
write_df_in_table(conn=connection, df= black_df, schema_name="sketch", table_name="protected_bills_b")
write_df_in_table(conn=connection, df= asian_df, schema_name="sketch", table_name="protected_bills_a")
"""
| 3.203125 | 3 |
xhale/_version.py | jupyter-xeus/xhale | 2 | 12787926 | <reponame>jupyter-xeus/xhale<gh_stars>1-10
#############################################################################
# Copyright (c) 2018, <NAME>, <NAME> and <NAME> #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
version_info = (0, 0, 1)
__version__ = '.'.join(map(str, version_info))
| 1.601563 | 2 |
recipes/Python/577236_ur1ca_commandline_client/recipe-577236.py | tdiprima/code | 2,023 | 12787927 | #!/usr/bin/env python
"""ur1.py -- command-line ur1.ca client.
ur1.ca is the URL shortening services provided by status.net. This script
makes it possible to access the service from the command line. This is done
by scraping the returned page and look for the shortened URL.
USAGE:
ur1.py LONGURL
RETURN STATUS:
If the URL is succesfully shortened by ur1.ca, it is written
to the standard output, and the program exits with status 0.
If ur1.ca fails to shorten the long URL, the error message
provided by ur1.ca is written to the standard error, and the
program exits with status 1.
If the input URL is malformed, no attempts of contacting the
server is made, and the program exits with status 2.
"""
import sys
import urllib
import urlparse
import re
UR1CA = "http://ur1.ca/"
ESUCCESS = 0
EFAIL = 1
EBADARG = 2
RE_GOOD = re.compile(r'<p class="success">Your ur1 is: <a href="(?P<shorturl>.+)">(?P=shorturl)</a></p>')
RE_BAD = re.compile(r'<p class="error">(?P<errormsg>.+)</p>')
def isgoodarg(url):
"""Check if the input URL makes "sense".
A URL does not make sense if the scheme is neither http or https,
or the host part is missing.
url: input URL
Returns boolean indicating whether the URL makes sense.
"""
parse_result = urlparse.urlparse(url)
#pylint: disable-msg=E1101
isgoodscheme = (parse_result.scheme == "http" or
parse_result.scheme == "https")
isgoodhost = parse_result.hostname
return isgoodscheme and isgoodhost
def parameterize(url):
"""Encode input URL as POST parameter.
url: a string which is the URL to be passed to ur1.ca service.
Returns the POST parameter constructed from the URL.
"""
return urllib.urlencode({"longurl": url})
def request(parameter):
"""Send POST request to ur1.ca using the parameter.
parameter: the parameter to the POST request, as returned by
parameterize().
Returns the file-like object as returned by urllib.urlopen.
"""
return urllib.urlopen(UR1CA, parameter)
def retrievedoc(response):
"""Retrieve the HTML text from the ur1.ca response.
response: the file-like HTTP response file returned by ur1.ca.
Returns the text as a string.
"""
#XXX: ensure all bytes are read
res_info = response.info()
clength = int(res_info["content-length"])
return response.read(clength)
def scrape(document):
"""Scrape the HTML document returned from ur1.ca for the answer.
document: HTML document returned from ur1.ca
Returns a 2-tuple (success, answer) where --
success: boolean value indicating whether the service returned
some meaningful result
answer: if success, this is the shortened URL, otherwise a string
indicating the possible problem
"""
goodguess = RE_GOOD.search(document)
if goodguess:
matchdict = goodguess.groupdict()
return (True, matchdict["shorturl"])
badguess = RE_BAD.search(document)
if badguess:
matchdict = badguess.groupdict()
return (False, matchdict["errormsg"])
else:
return (False, "Unknown local error.")
def __do_main():
"""Do everything."""
try:
arg = sys.argv[1]
except IndexError:
sys.exit(EBADARG)
if not isgoodarg(arg):
sys.exit(EBADARG)
post_param = parameterize(arg)
answerfile = request(post_param)
doc = retrievedoc(answerfile)
answerfile.close()
status, msg = scrape(doc)
if status:
print msg
sys.exit(ESUCCESS)
else:
print >> sys.stderr, msg
sys.exit(EFAIL)
if __name__ == "__main__":
__do_main()
| 3.609375 | 4 |
app/config.py | okosioc/flask-seed | 3 | 12787928 | <reponame>okosioc/flask-seed
DOMAIN = 'flask-seed.com'
ENV = 'production'
DEBUG = False
SECRET_KEY = '<FIXME>'
CACHE_TYPE = "SimpleCache"
CACHE_DEFAULT_TIMEOUT = 300
CACHE_THRESHOLD = 10240
ACCEPT_LANGUAGES = ['en', 'zh']
BABEL_DEFAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
DEBUG_LOG = 'logs/debug.log'
ERROR_LOG = 'logs/error.log'
ADMINS = ['<FIXME>']
MAIL_SERVER = 'smtp.mxhichina.com'
MAIL_PORT = 465
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = '<FIXME>'
MAIL_PASSWORD = '<<PASSWORD>>'
MAIL_DEFAULT_SENDER = '<FIXME>'
MONGODB_URI = 'mongodb://localhost:27017/flask-seed'
MONGODB_URI_PYTEST = 'mongodb://localhost:27017/pytest'
# Upload to Storage Service
UPLOAD_ENDPOINT = '//upload.qiniup.com/'
UPLOAD_BASE = '//cdn.flask-seed.com'
UPLOAD_BUCKET = 'flask-seed'
UPLOAD_AK = '<FIXME>'
UPLOAD_SK = '<FIXME>'
UPLOAD_MIMES = ['image/jpeg', 'image/png', 'image/gif',
'video/quicktime', 'video/mp4', 'video/mpeg', 'video/webm',
'audio/mpeg', 'audio/x-wav', 'audio/webm']
UPLOAD_MAX = 50
UPLOAD_IMAGE_PREVIEW_SM = '?imageMogr2/thumbnail/x200'
UPLOAD_IMAGE_PREVIEW_MD = '?imageMogr2/thumbnail/600x'
UPLOAD_VIDEO_POSTER_SM = '?vframe/jpg/offset/1/h/200'
# Upload to Local
# UPLOAD_ENDPOINT = '/upload'
# UPLOAD_FOLDER = 'uploads'
# UPLOAD_MIMES = ['image/jpeg', 'image/png']
# UPLOAD_MAX = 10
# UPLOAD_IMAGE_PREVIEW_SM = ''
# UPLOAD_IMAGE_PREVIEW_MD = ''
# UPLOAD_VIDEO_COVER_SM = ''
| 1.578125 | 2 |
examples/using_parameters.py | danielk333/pyant | 1 | 12787929 | <gh_stars>1-10
'''
Using parameters
=================
'''
import matplotlib.pyplot as plt
import numpy as np
import pyant
beam = pyant.Airy(
azimuth=[0, 45.0, 0],
elevation=[89.0, 80.0, 60.0],
frequency=[930e6, 230e6],
I0=10**4.81,
radius=np.linspace(10,23.0,num=20),
)
k = np.array([0,0,1.0]).T
#This is the shape and names of the parameters
print(f'beam.shape = {beam.shape}')
print(f'beam.parameters = {beam.parameters}')
#this means their values can be found trough the corresponding attributes
print(f'beam.radius = {beam.radius}')
#One needs to choose values for all parameters
#Either trough direct input into beam.gain
print(f'G = {beam.gain(k, pointing=k, frequency=314e6, radius=20.0)} ')
#pointing is the only parameter that supports also input by azimuth and elevation
print(f'G = {beam.gain(k, azimuth=20.2, elevation=89.1, frequency=314e6, radius=20.0)} ')
#Or trough indexing of the currently entered parameters
print(f'G = {beam.gain(k, ind=(0,1,10))} ')
#(indexing can also be done as a dict for more readability)
print(f'G = {beam.gain(k, ind=dict(pointing=0,frequency=1,radius=10))} ')
#Or a combination of both
print(f'G = {beam.gain(k, ind=(0,None,10), frequency=333e6)} ')
print('-- exceptions --')
#Inconsistencies raise value and type errors
#like supplying both a index and a value
try:
print(f'G = {beam.gain(k, ind=(0,1,10), frequency=333e6)} ')
except Exception as e:
print(f'Exception: "{e}"')
#or not giving values for parameters at all
try:
print(f'G = {beam.gain(k)} ')
except Exception as e:
print(f'Exception: "{e}"')
#or not giving enough values
try:
print(f'G = {beam.gain(k, ind=(0,1))} ')
except Exception as e:
print(f'Exception: "{e}"')
#or trying to index scalar parameters
beam.frequency = 930e6
#now the size will be None for this parameter
print(f'beam.shape = {beam.shape}')
#so indexing it will raise an error
try:
print(f'G = {beam.gain(k, ind=(0,1,10))} ')
except Exception as e:
print(f'Exception: "{e}"')
print('-- exceptions end --')
#while setting it to None will just use the parameter value
print(f'G = {beam.gain(k, ind=(0,None,10))} ')
#if you have all scalar parameters, no index needs to be supplied
beam.radius = 23
beam.pointing = k.copy()
print(f'G = {beam.gain(k)} ')
#this also works with size=1 parameters
beam.radius = [23]
print(f'G = {beam.gain(k)} ') | 3.265625 | 3 |
Other/Python/DetectCapitals.py | siddharth952/Interview-Prep | 0 | 12787930 | <filename>Other/Python/DetectCapitals.py<gh_stars>0
def detectCapitals(word:str):
# Flag to keep track of the cases
c = 0
for i in word:
if i == i.upper():
c += 1
# If all words are small || If first cap and rest are small || All are caps
return c == len(word) or (c == 1 and word[0] == word[0].upper()) or c == 0
print(detectCapitals("Thisshouldreturntrue"))
print(detectCapitals("ReturnFaLSe")) | 3.671875 | 4 |
venv/venv/Scripts/django-admin.py | RawFlash/BabyShoesShop | 0 | 12787931 | <reponame>RawFlash/BabyShoesShop<gh_stars>0
#!c:\users\30026663\desktop\learn\c9b1~1\sayt1\sayt1\venv\venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 1.148438 | 1 |
recursividade_soma.py | fabiobarreto-data-science/Data-Science---Let-s-Code | 0 | 12787932 | <reponame>fabiobarreto-data-science/Data-Science---Let-s-Code
"""
Desenvolva um script que calcule a soma dos N primeiros números naturais. Utilize a recursividade para obter o resultado.
"""
def soma(n):
if n == 1:
return 1
else:
return n + soma(n - 1)
print(soma(3)) | 3.421875 | 3 |
data/mask_dataset.py | xiaonanQua/experiment | 0 | 12787933 | <gh_stars>0
import xml.etree.ElementTree as ET
import numpy as np
from PIL import Image
import glob
class MaskDataset:
def __init__(self):
# 根目录;数据路径;
self.root = '/home/team/xiaonan/dataset/mask/'
self.data_path = {
'sample': self.root,
'train': self.root,
'test': self.root
}
# 获取图像和注释路径
self.image_path = glob.glob(self.data_path['sample'] + '*.jpg')
self.ann_path = glob.glob(self.data_path['sample'] + '*.xml')
# 标签名称
self.label_dict = {
'mask': 0,
'head': 1,
'back': 2,
'mid_mask': 3
}
self.label_names = ['mask', 'head', 'back', 'mid_mask']
# 制作图像名称和路径的字典对,即{‘*.jpg’:'/**/**/*.jpg'}
self.image_path_dict = self.data_dict(self.image_path)
# 是否使用difficult
self.use_difficult = True
# 数据集大小
self.data_size = len(self.ann_path)
# 边界框名称
self.bbox_name = ['ymin', 'xmin', 'ymax', 'xmax']
def get_example(self, index):
# 解析单个注释文件
anno = ET.parse(self.ann_path[index])
# 定义边界框、标签列表、困难列表
bbox_list = list()
label_list = list()
difficult_list = list()
# 遍历‘目标’标签
for attr in anno.findall('object'):
# 当不使用困难划分时,并是困难时,则跳过以下操作。
if not self.use_difficult and int(attr.find('difficult').text) == 1:
print('1')
continue
# 获取标签名称(去空格、变成小写)
label_ = attr.find('name').text.lower().strip()
label_list.append(self.label_dict[label_])
# 获取边界框;减去1以使像素索引从0开始
bbox_ = attr.find('bndbox')
bbox_list.append([int(bbox_.find(bbox_tag).text) - 1
for bbox_tag in self.bbox_name])
# 获取困难值
difficult_list.append(int(attr.find('difficult').text))
# 将标签、边界框、困难列表堆叠成numpy数组
label = np.stack(label_list).astype(np.int32)
bbox = np.stack(bbox_list).astype(np.float32)
# 当' use difficult==False '时,' difficult '中的所有元素都为False。
difficult = np.array(difficult_list, dtype=np.bool).astype(np.uint8)
# 加载图像数据
image_path = self.image_path_dict[anno.find('filename').text.lower().strip()]
image = self.read_image(image_path)
return image, bbox, label, difficult
def __len__(self):
return self.data_size
__getitem__ = get_example
def read_image(self, image_file):
"""
读取图像数据
Args:
image_file: 图像的路径
Returns:
"""
# 读取图像
image = Image.open(image_file)
# 将图像转化成RGB模式和numpy格式
try:
img = image.convert('RGB')
img = np.asarray(img, dtype=np.float32)
finally:
if hasattr(image, 'close'):
image.close()
if img.ndim == 2:
# reshape (H, W) -> (1, H, W)
return img[np.newaxis]
else:
# transpose (H, W, C) -> (C, H, W)
return img.transpose((2, 0, 1))
def data_dict(self, data):
"""
制作数据字典,如图像路径列表,将图像文件名称和其路径对应到字典中
Args:
data: 数据列表
Returns:数据字典
"""
data_dic = dict()
for idx, path in enumerate(data):
data_name = str(path.split('/')[-1].lower())
data_dic[data_name] = path
print('\r 制作数字字典:【{}|{}】'.format(idx+1, len(data)), end=' ')
return data_dic
if __name__ == '__main__':
dataset = MaskDataset()
| 2.421875 | 2 |
reports/__init__.py | shawnclq/Adversarial-Threat-Detector | 29 | 12787934 | <filename>reports/__init__.py
# Module providing evasion attacks.
from reports.report_utility import ReportUtility
from reports.report_html import HtmlReport
from reports.report_ipynb import IpynbReport
| 1.382813 | 1 |
sebastian/create_toks_conll.py | Emrys-Hong/fastai_sequence_tagging | 23 | 12787935 | from fastai.text import *
import fire
BOS = 'xbos' # beginning-of-sentence tag
FLD = 'xfld' # data field tag
BOS_LABEL = '_bos_'
PAD = '_pad_'
re1 = re.compile(r' +')
def read_file(filepath):
assert os.path.exists(filepath)
sentences = []
labels = []
with open(filepath, encoding='utf-8') as f:
sentence = [BOS]
sentence_labels = [BOS_LABEL]
for line in f:
if line == '\n':
sentences.append(sentence)
labels.append(sentence_labels)
sentence = [BOS] # use xbos as the start of sentence token
sentence_labels = [BOS_LABEL]
else:
sentence.append(line.split()[0].lower())
# label is generally in the last column
sentence_labels.append(line.split()[-1])
if sentence: # some files, e.g. NER end on an empty line
sentences.append(sentence)
labels.append(sentence_labels)
return sentences, labels
def create_toks(prefix, max_vocab=30000, min_freq=1):
PATH = f'data/nlp_seq/{prefix}/'
names = {}
if prefix == 'ner':
names['train'] = 'train.txt'
names['val'] = 'valid.txt'
names['test'] = 'test.txt'
else:
raise ValueError(f'Filenames for {prefix} have to be added first.')
paths = {}
for split in ['train', 'val', 'test']:
paths[split] = f'{PATH}{names[split]}'
print(f'prefix {prefix} max_vocab {max_vocab} min_freq {min_freq}')
os.makedirs(f'{PATH}tmp', exist_ok=True)
trn_tok, trn_labels = read_file(paths['train'])
val_tok, val_labels = read_file(paths['val'])
test_tok, test_labels = read_file(paths['test'])
for trn_t, trn_l in zip(trn_tok[:5], trn_labels[:5]):
print('Sentence:', trn_t, 'labels:', trn_l)
print(f'# of train: {len(trn_tok)}, # of val: {len(val_tok)},'
f'# of test: {len(test_tok)}')
freq = Counter(p for o in trn_tok for p in o)
print(freq.most_common(25))
itos = [o for o, c in freq.most_common(max_vocab) if c > min_freq]
itos.insert(0, PAD)
itos.insert(0, '_unk_')
stoi = collections.defaultdict(lambda: 0,
{v: k for k, v in enumerate(itos)})
print(len(itos))
trn_ids = np.array([[stoi[o] for o in p] for p in trn_tok])
val_ids = np.array([[stoi[o] for o in p] for p in val_tok])
test_ids = np.array([[stoi[o] for o in p] for p in test_tok])
# map the labels to ids
freq = Counter(p for o in trn_labels for p in o)
print(freq)
itol = [l for l, c in freq.most_common()]
itol.insert(1, PAD) # insert padding label at index 1
print(itol)
ltoi = {l: i for i, l in enumerate(itol)}
trn_lbl_ids = np.array([[ltoi[o] for o in p] for p in trn_labels])
val_lbl_ids = np.array([[ltoi[o] for o in p] for p in val_labels])
test_lbl_ids = np.array([[ltoi[o] for o in p] for p in test_labels])
ids_joined = np.array([[stoi[o] for o in p] for p in trn_tok + val_tok + test_tok])
val_ids_joined = ids_joined[int(len(ids_joined)*0.9):]
ids_joined = ids_joined[:int(len(ids_joined)*0.9)]
np.save(f'{PATH}tmp/trn_ids.npy', trn_ids)
np.save(f'{PATH}tmp/val_ids.npy', val_ids)
np.save(f'{PATH}tmp/test_ids.npy', test_ids)
np.save(f'{PATH}tmp/lbl_trn.npy', trn_lbl_ids)
np.save(f'{PATH}tmp/lbl_val.npy', val_lbl_ids)
np.save(f'{PATH}tmp/lbl_test.npy', test_lbl_ids)
pickle.dump(itos, open(f'{PATH}tmp/itos.pkl', 'wb'))
pickle.dump(itol, open(f'{PATH}tmp/itol.pkl', 'wb'))
np.save(f'{PATH}tmp/trn_lm_ids.npy', ids_joined)
np.save(f'{PATH}tmp/val_lm_ids.npy', val_ids_joined)
if __name__ == '__main__': fire.Fire(create_toks)
| 2.828125 | 3 |
codecamp/products/models.py | karuvally/django_brushup | 0 | 12787936 | from django.db import models
from django.urls import reverse
# Create your models here.
class Product(models.Model):
title = models.CharField(max_length=120)
description = models.TextField(null=True)
price = models.DecimalField(decimal_places=2, max_digits=100)
summary = models.TextField(blank=True)
featured = models.BooleanField(default=False)
def get_absolute_url(self):
return reverse("products:product_detail", kwargs={"id_lookup": self.id})
| 2.484375 | 2 |
scratch/shawn/trianglewave.py | sasgc6/pysmurf | 3 | 12787937 | import pysmurf
#S = pysmurf.SmurfControl(make_logfile=False,setup=False,epics_root='test_epics',cfg_file='/usr/local/controls/Applications/smurf/pysmurf/pysmurf/cfg_files/experiment_fp28_smurfsrv04.cfg')
import numpy as np
import time
Vrange=np.linspace(0,0.195/6.,100)+S.get_tes_bias_bipolar(3)
Vrange=[Vrange,Vrange[::-1]]
Vrange=np.array(Vrange).flatten()
while True:
for Vtes in Vrange:
S.set_tes_bias_bipolar(7,Vtes)
time.sleep(0.005)
| 2.21875 | 2 |
main.py | stevillis/ParkingSpaceDetection | 1 | 12787938 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 03 11:19:25 2018
@author: Stévillis and Valdinilson
title: Smart Parking System
"""
import cv2
import numpy as np
import yaml
import time
import markpolygons
def draw_masks(parking_data):
"""
Draw masks in parking_data points.
:param parking_data: points of parking spaces
:return: None
"""
if parking_data is not None: # If there are points of parking spaces in the parkinglot.yml file
for park in parking_data:
points = np.array(park['points']) # Convert the parkinglot.yml data to a numpy array
rect = cv2.boundingRect(points) # Return the points of the rectangle around the parking space (x, y, w, h)
points_shifted = points.copy() # Just a faster copy of points (better than slices)
# Shift contour to region of interest
# Subtract x (from x, y, w, h) value from the original x points
points_shifted[:, 0] = points[:, 0] - rect[0]
# Subtract y (from x, y, w, h) value from the original y points
points_shifted[:, 1] = points[:, 1] - rect[1]
parking_bounding_rects.append(rect) # Store the region of each parking space for analysis
"""
Paremeters of drawContours:
image - An array of zeros with dimensions h and w
contours - All the input contours
countourIdx - Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
color - Color of the contours. color=255 menas color=(255,255,255)
thickness - Thickness of lines the contours are drawn with. If it is negative , the contour interiors
are drawn.
"""
mask = cv2.drawContours(np.zeros((rect[3], rect[2]), dtype=np.uint8), [points_shifted],
contourIdx=-1, color=255, thickness=-1, lineType=cv2.LINE_8)
print(mask)
mask = mask == 255 # Compare all the mask points. Zero becomes False and 255 becomes True
parking_mask.append(mask) # Store the region of each parking space drawn for analysis
else: # Initialize the parking spaces marking
markpolygons.start(cap)
def detect_vacant_spaces(frame_blur):
"""
Detect cars and vacant spaces in parking.
:param frame_blur: frame_blur
:return: None
"""
parking_dict = {} # Store the status of each parking space
# Detecting vacant spaces
for ind, park in enumerate(parking_data):
points = np.array(park['points'])
rect = parking_bounding_rects[ind]
# roi_gray = frame_gray[rect[1]:(rect[1] + rect[3]),
# rect[0]:(rect[0] + rect[2])] # crop roi for faster calculation
roi_gray = frame_blur[rect[1]:(rect[1] + rect[3]),
rect[0]:(rect[0] + rect[2])] # Crop roi for faster calculation
laplacian = cv2.Laplacian(roi_gray, cv2.CV_64F) # Apply Laplacian filter to detect edges
points[:, 0] = points[:, 0] - rect[0] # Shift contour to roi
points[:, 1] = points[:, 1] - rect[1]
# Compute the arithmetic mean along the specified axis and returns a new array containing the mean values
delta = np.mean(np.abs(laplacian * parking_mask[ind]))
status = delta < config['park_laplacian_th']
# While parking spaces isn't in the end, add parking index and status to the parking_dict
if ind < len(parking_data):
parking_dict[str(ind + 1)] = parking_status[ind] # ind starts in 0
if ind == len(parking_data) - 1: # When all the parking spaces were done
# Write the parking_dict in a temp_file to be read after
f = open('file_temp.txt', 'w')
f.write(str(parking_dict))
f.close()
parking_dict.clear() # Clear the dict to restart the process
if parking_status[ind]:
print('Vaga {} está vazia!'.format(int(park['id']) + 1))
else:
print('Vaga {} está ocupada!'.format(int(park['id']) + 1))
# If detected a change in parking status, save the current time
if status != parking_status[ind] and parking_buffer[ind] is None:
parking_buffer[ind] = time.time() - time_video
# If status is still different than the one saved and counter is open
elif status != parking_status[ind] and parking_buffer[ind] is not None:
if time_video - parking_buffer[ind] > config['park_sec_to_wait']:
parking_status[ind] = status
parking_buffer[ind] = None
# If status is still same and counter is open
elif status == parking_status[ind] and parking_buffer[ind] is not None:
parking_buffer[ind] = None
def print_parkIDs(park, coor_points, frame):
"""
Print Park IDs in parking spaces.
:param park: Each parking space
:param coor_points: Coordinates of parking space
:param frame: Frame to put the text indice
:return: None
"""
moments = cv2.moments(coor_points) # Calculate the center of mass of the object
centroid = (int(moments['m10'] / moments['m00']) - 3, int(moments['m01'] / moments['m00']) + 3)
cv2.putText(frame, str(int(park['id']) + 1), centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(255, 0, 0), 1, cv2.LINE_AA)
if __name__ == '__main__':
# Path references
fn_yaml = r'datasets/parkinglot.yml'
config = {'park_laplacian_th': 2.1,
'park_sec_to_wait': 2000 # 4 wait time for changing the status of a region
}
# Set capture device
cap = cv2.VideoCapture(0)
# Read YAML data (parking space polygons)
with open(fn_yaml, 'r') as stream:
parking_data = yaml.load(stream)
parking_bounding_rects = [] # Points of parking spaces
parking_mask = [] # bool points of parking spaces
# Draw parking masks
draw_masks(parking_data)
if parking_data is not None: # If there are points of parking spaces in the parkinglot.yml file
parking_status = [False] * len(parking_data) # A list of len(parking_data) False items
parking_buffer = [None] * len(parking_data) # # A list of len(parking_data) None items
# While takes about 0.037 seconds for each loop
# 1 second ~ 27 while iterations
while_executions_counter = 0
while cap.isOpened():
# Read frame-by-frame
ret, frame = cap.read()
# Counting time of video in seconds
time_video = time.time()
if not ret: # Camera is not running
print('Capture Error')
break
# Background Subtraction
# frame_blur = cv2.GaussianBlur(frame.copy(), (5, 5), 3)
# frame_gray = cv2.cvtColor(frame_blur, cv2.COLOR_BGR2GRAY)
# detect_vacant_spaces(frame_gray)
# detect_vacant_spaces(frame_blur)
if while_executions_counter == 27: # After 1 second, check if the parking spaces status has changed
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Base class for Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
# Create implementation for CLAHE: Equalizes the histogram of a grayscale image using Contrast Limited
# Adaptive Histogram Equalization.
frame_his = clahe.apply(frame_gray)
__, frame_thr = cv2.threshold(frame_his, 40, 255, cv2.THRESH_BINARY) # Get a binary image
frame_blur = cv2.GaussianBlur(frame_thr, (5, 5), 3) # Apply a GaussianBlur filter to reduce noise
detect_vacant_spaces(frame_blur) # Call the function to detect vacant spaces
while_executions_counter = 0
# Changing the color on the basis on status change occured in the above section and putting numbers on areas
frame_out = frame.copy()
for ind, park in enumerate(parking_data):
points = np.array(park['points']) # Points of parking spaces
if parking_status[ind]:
color = (0, 255, 0)
else:
color = (0, 0, 255)
cv2.drawContours(frame_out, [points], contourIdx=-1, color=color, thickness=2, lineType=cv2.LINE_8)
print_parkIDs(park, points, frame_out) # Put a number on each parking space
# Display video
cv2.imshow('frame', frame_out)
k = cv2.waitKey(1)
if k == 27:
break
elif k & 0xFF == ord('d'): # Delete all the parking spaces points
with open(fn_yaml, 'w') as stream:
pass
break
while_executions_counter += 1
cap.release() # Close capturing device
cv2.destroyAllWindows()
| 3.03125 | 3 |
create-results/utils.py | jphire/iothub-testbed | 0 | 12787939 | <filename>create-results/utils.py
import os
import numpy as np
import scipy as sp
import scipy.stats
import json
tags_map = {
'feed_fetched':'Fetching-feed',
'after_data_fetch':'Fetching-data',
'execution_end':'Executing-code',
'after_data_map':'Mapping-data',
'piece_response_latency':'Hub-latency',
'dist_response_latency':'Gathering-all-responses',
'after_reducer':'Reducing-data',
'before_sending_response':'Formatting-response',
'after_response':'Response-in-flight'
}
def run(filename, nodes, size):
profile = {}
means = {}
cpuData = []
memData = []
latencyData = []
content_length = []
profile['after_response'] = []
with open(filename) as file:
for line in file:
data = json.loads(line)['profiler']['data']
latency = json.loads(line)['profiler']['latency']
for key, val in data.items():
usage = val[0]['usage']
if is_number(usage['cpu']):
cpuData.append(usage['cpu'])
memData.append(usage['mem'])
latencyData.append(latency)
# payload data
if ('contentLength' in val[0]):
content_length.append(val[0]['contentLength'])
# profiling data
if not key in profile:
profile[key] = []
for value in val:
profile[key].append(value['time'])
profile['after_response'].append(int(latency))
for tag, val in profile.items():
means[tag] = mean_confidence_interval(val)
mem = mean_confidence_interval(memData)
cpu = mean_confidence_interval(cpuData)
latency = mean_confidence_interval(latencyData)
if len(content_length) > 0:
payload = mean_confidence_interval(map(int, content_length))
else:
payload = []
return {'nodes':nodes, 'size':size, 'cpu':cpu, 'mem':mem, 'latency':latency, 'payload':payload, 'profile':means}
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
except:
return False
def prettify(tag):
return tags_map[tag]
def getTotalNodeCount(nesting_level, node):
total_node_count = node
if (nesting_level == '2' or nesting_level == '3'):
total_node_count = int(node) * 4 if nesting_level == '2' else int(node) * 8
return str(total_node_count)
def getLatestTimestamp():
latest = 0
# Get latest log files' timestamped path
for dirname, dirnames, filenames in os.walk('../logs/solmuhub'):
for subdirname in dirnames:
try:
tmp = int(subdirname)
latest = max(latest, tmp)
latest_path = os.path.join(dirname, str(latest))
except ValueError:
continue
return latest, latest_path
def configureTest(path):
sizes = []
nodes = []
depths = []
# Get node count and sizes from the logs
for dirname, dirnames, filenames in os.walk(path):
for name in filenames:
try:
nodeCount = name.split('-')[0]
size = name.split('-')[3]
depth = name.split('-')[5]
if size not in sizes:
sizes.append(size)
if nodeCount not in nodes:
print nodeCount
nodes.append(nodeCount)
if depth not in depths:
depths.append(depth)
except IndexError:
continue
except ValueError:
continue
# Sort sizes so that results are in correct format for plotting
sizes.sort(key=int)
depths.sort(key=int)
nodes.sort(key=int)
return sizes, nodes, depths
| 2.3125 | 2 |
database/olympics.py | rossgk2/cs257 | 0 | 12787940 | <reponame>rossgk2/cs257
# Written by <NAME> and <NAME>
import psycopg2
import argparse
from config import password
from config import database
from config import user
(connection, cursor) = (None, None)
def main():
arguments = get_parsed_arguments()
# Update the connection and cursor.
(_connection, _cursor) = database_init()
global connection
connection = _connection
global cursor
cursor = _cursor
# Do the SQL queries depending on what was passed in from the command line.
if arguments.athletesfrom != "":
noc = arguments.athletesfrom
query1(noc)
if arguments.nocbymedals:
query2()
if arguments.events:
query3()
# Get stuff from command line.
def get_parsed_arguments():
parser = argparse.ArgumentParser(description='Sort/filter a CSV formatted list of books and authors')
help = 'python3 olympics.py\n'
help += '--athletesfrom (-a) takes a NOC and returns every athlete from the region (takes one argument)\n'
help += '--nocbymedals (-n) lists the NOCs by gold medals (takes one argument to run although the argument doesnt matter)\n'
help += '--events (-e) lists every olympian\n'
help += 'to compete in a certain event (one argument)\n'
help += '--help (h) (gives you list of commands if needed)'
parser.add_argument('--help, -h', help = help)
parser.add_argument('--athletesfrom', '-a', help='lists every athlete from a given NOC')
parser.add_argument('--nocbymedals', '-n', help='lists every NOC by total gold medals (decreasing order). needs an argument to run but what argument is doesnt matter')
parser.add_argument('--events', '-e', help='lists every athlete who has competed in the listed event')
parsed_arguments = parser.parse_args()
return parsed_arguments
def database_init():
# Connect to the database.
try:
connection = psycopg2.connect(database=database, user=user, password=password)
except Exception as e:
print(e)
exit()
# Set up cursor.
try:
cursor = connection.cursor()
except Exception as e:
print(e)
exit()
return (connection, cursor)
# SQL command to get the names of all the athletes from a specified NOC
def get_query1(noc):
return "SELECT DISTINCT athletes.name, athletes.team_id, teams.id from athletes, teams WHERE teams.noc = {}" \
" and athletes.team_id = teams.id".format(noc)
# SQL command to list all the NOCs and the number of gold medals they have won, in decreasing order of the number of gold medals.
def get_query2(noc, medal):
return "SELECT athletes.name, teams.id FROM athletes, teams WHERE teams.noc = {}" \
" AND athletes.medal = '{}' AND athletes.team_id = teams.id".format(noc, medal)
# SQL command to list the names of all the athletes that competed in wrestling
query3 = "SELECT DISTINCT athletes.name, athletes.event FROM athletes WHERE event = 'Wrestling' and athletes.event = event"
def query1(noc):
# Query the database, leaving you with a "cursor"--an object you can
# use to iterate over the rows generated by your query.
try:
cursor.execute(get_query1("%s"), (noc, ))
except Exception as e:
print(e)
exit()
print(1)
# We have a cursor now. Iterate over its rows to print the results.
for row in cursor:
print(row[0], row[1])
print()
def query2():
# Get list of NOCs.
try:
cursor.execute("SELECT teams.noc FROM teams")
except Exception as e:
print(e)
exit()
noc_list = [n[0] for n in cursor]
def get_medal_count(noc, medal):
# Do query2.
try:
cursor.execute(get_query2("%s", medal), (noc, ))
except Exception as e:
print(e)
exit()
n = 0
for i in cursor: # dumb but works
n += 1
return n
def order_nocs_by_medal_count():
result = []
for noc in noc_list:
team_medals = []
team_medals.append(noc)
team_medals.append(get_medal_count(noc, "Gold"))
if len(result) > 0:
index = 0
for medals in result:
if medals[1] < team_medals[1]:
result.insert(index, team_medals)
break
elif team_medals[1] == 0:
result.append(team_medals)
break
else:
index += 1
else:
result.append(team_medals)
return result
for (noc, count) in order_nocs_by_medal_count():
print("Number of gold medals won by {} is {}.".format(noc, count))
def query3():
# Now do query3.
try:
cursor.execute(query3)
except Exception as e:
print(e)
exit()
main() | 3.25 | 3 |
agent/setup.py | kuralabs/coral-dashboard | 0 | 12787941 | <reponame>kuralabs/coral-dashboard
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 KuraLabs S.R.L
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Installation script for the Coral Agent.
"""
from pathlib import Path
from setuptools import setup, find_packages
def check_cwd():
"""
You must always change directory to the parent of this file before
executing the setup.py script. If not setuptools will fail reading files,
including and excluding files from the MANIFEST.in, defining the library
path, etc.
"""
from os import chdir
here = Path(__file__).resolve().parent
if Path().cwd().resolve() != here:
print('Changing directory to {}'.format(here))
chdir(str(here))
check_cwd()
def read(filename):
"""
Read the content of a file.
:param str filename: The file to read.
:return: The content of the file.
:rtype: str
"""
return Path(filename).read_text(encoding='utf-8')
def find_version(filename):
"""
Find version of a package.
This will read and parse a Python module that has defined a __version__
variable. This function does not import the file.
::
setup(
...
version=find_version('lib/package/__init__.py'),
...
)
:param str filename: Path to a Python module with a __version__ variable.
:return: The version of the package.
:rtype: str
"""
import re
content = read(filename)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M
)
if not version_match:
raise RuntimeError('Unable to find version string.')
version = version_match.group(1)
print('Version found:')
print(' {}'.format(version))
print('--')
return version
def find_requirements(filename):
"""
Finds PyPI compatible requirements in a pip requirements.txt file.
In this way requirements needs to be specified only in one, centralized
place:
::
setup(
...
install_requires=find_requirements('requirements.txt'),
...
)
Supports comments and non PyPI requirements (which are ignored).
:param str filename: Path to a requirements.txt file.
:return: List of requirements with version.
:rtype: list
"""
import string
content = read(filename)
requirements = []
ignored = []
for line in (
line.strip() for line in content.splitlines()
):
# Comments
if line.startswith('#') or not line:
continue
if line[:1] not in string.ascii_letters:
ignored.append(line)
continue
requirements.append(line)
print('Requirements found:')
for requirement in requirements:
print(' {}'.format(requirement))
print('--')
print('Requirements ignored:')
for requirement in ignored:
print(' {}'.format(requirement))
print('--')
return requirements
setup(
name='coral_agent',
version=find_version('coral_agent/__init__.py'),
packages=find_packages(),
# Dependencies
install_requires=find_requirements('requirements.txt'),
# Metadata
author='<NAME>',
author_email='<EMAIL>',
description=(
'Agent (data collector) for the Coral Project (Gaming PC)'
),
long_description=read('README.rst'),
url='https://github.com/kuralabs/coral-dashboard',
keywords='coral_agent',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
python_requires='>=3.5.0',
entry_points={
'console_scripts': [
'coral_agent = coral_agent.__main__:main',
]
},
)
| 1.726563 | 2 |
api_yamdb/reviews/management/commands/upload.py | LHLHLHE/api_yamdb | 0 | 12787942 | <reponame>LHLHLHE/api_yamdb
import csv
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.auth import get_user_model
from reviews.models import (Category,
Genre,
GenreTitle,
Title,
Comment,
Review)
User = get_user_model()
def category_create(row):
Category.objects.get_or_create(
id=row[0],
name=row[1],
slug=row[2],
)
def genre_create(row):
Genre.objects.get_or_create(
id=row[0],
name=row[1],
slug=row[2],
)
def titles_create(row):
Title.objects.get_or_create(
id=row[0],
name=row[1],
year=row[2],
category_id=row[3],
)
def genre_title_create(row):
GenreTitle.objects.get_or_create(
id=row[0],
genre_id=row[2],
title_id=row[1],
)
def users_create(row):
User.objects.get_or_create(
id=row[0],
username=row[1],
email=row[2],
role=row[3],
bio=row[4],
first_name=row[5],
last_name=row[6],
)
def review_create(row):
Review.objects.get_or_create(
id=row[0],
title_id=row[1],
text=row[2],
author_id=row[3],
score=row[4],
pub_date=row[5]
)
def comment_create(row):
Comment.objects.get_or_create(
id=row[0],
review_id=row[1],
text=row[2],
author_id=row[3],
pub_date=row[4],
)
action = {
'category.csv': category_create,
'genre.csv': genre_create,
'titles.csv': titles_create,
'genre_title.csv': genre_title_create,
'users.csv': users_create,
'review.csv': review_create,
'comments.csv': comment_create,
}
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'filename',
nargs='+',
type=str
)
def handle(self, *args, **options):
for filename in options['filename']:
path = os.path.join(settings.BASE_DIR, "static/data/") + filename
with open(path, 'r', encoding='utf-8') as file:
reader = csv.reader(file)
next(reader)
for row in reader:
action[filename](row)
| 2.140625 | 2 |
petridish/app/directory.py | Bhaskers-Blu-Org2/petridishnn | 121 | 12787943 | <reponame>Bhaskers-Blu-Org2/petridishnn
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import glob
import re
from petridish.philly.container import is_philly
from petridish.app.multi_proc import has_stopped
"""
Dir structures
"""
def _updir(d, n=1):
for _ in range(n):
d = os.path.dirname(d)
return d
"""
Philly specific dir structures regarding multiple trials of the same experiment
"""
def previous_trial_log_root(log_root):
if not is_philly():
return None
# e.g., xx/application_xx-xx/logs/2/petridish_main
log_root = os.path.normpath(log_root)
triali = int(os.path.basename(_updir(log_root, 1)))
if triali == 1:
return None
return os.path.join(_updir(log_root, 2), str(triali - 1), os.path.basename(log_root))
def previous_trial_model_root(model_root):
if not is_philly():
return None
# e.g., xxx/application_xx-xx/models
return os.path.normpath(model_root)
#model_root = os.path.normpath(model_root)
#triali = int(os.path.basename(model_root))
#if triali == 1:
# return None
#return os.path.join(_updir(model_root, 1), str(triali - 1))
"""
Helper functions to create names for communication over file-system.
Direct connections are not available.
"""
def _auto_script_fn(i, prefix=None):
if prefix is not None:
return '{}_{}.sh'.format(prefix, i)
return '{}.sh'.format(i)
def _auto_script_dir(log_dir, is_critic, is_log_dir_root=False):
n_updir = 1 + int(bool(is_critic)) - int(bool(is_log_dir_root)) #+ 2 * is_philly()
return os.path.join(_updir(log_dir, n_updir), 'auto_scripts')
def _all_mi(dir_root):
all_mi = []
for dn in os.listdir(dir_root):
try:
mi = int(os.path.basename(dn.strip()))
all_mi.append(mi)
except:
continue
return all_mi
def _dn_to_mi(dn):
try:
mi = int(os.path.basename(os.path.normpath(dn)))
return mi
except:
return None
def _mi_to_dn(dir_root, model_iter):
return os.path.join(dir_root, str(model_iter))
def _dn_to_ci(dn):
try:
ci = int(os.path.basename(os.path.normpath(dn)))
return ci
except:
return None
def _ci_to_dn(dir_root, critic_iter, queue_name):
if critic_iter is None:
return os.path.join(dir_root, queue_name)
return os.path.join(dir_root, queue_name, str(critic_iter))
def _all_critic_dn(dir_root, queue_name):
return glob.glob(os.path.join(dir_root, queue_name, '*'))
def _latest_ci(log_dir_root, model_dir_root, queue_name):
l_dns = _all_critic_dn(log_dir_root, queue_name)
max_ci = None
for dn in l_dns:
dn = os.path.normpath(dn.strip())
try:
# make sure the dirname is an int so it is actually a dir for critic
ci = int(os.path.basename(dn))
except:
continue
if not has_stopped(dn):
# make sure model is mark finished.
continue
if not os.path.exists(_ci_to_dn(model_dir_root, ci, queue_name)):
# make sure model exists
continue
if max_ci is None or max_ci < ci:
max_ci = ci
return max_ci
def _mi_info_save_fn(log_dir_root):
return os.path.join(log_dir_root, 'mi_info.npz') | 1.84375 | 2 |
idioms_examples/zip/zip.py | jjmerchante/Pythonic-webserver | 3 | 12787944 | <reponame>jjmerchante/Pythonic-webserver<filename>idioms_examples/zip/zip.py
names = ["John", "Alexander", "Bob"]
marks = [5.5, 7, 10]
for name, mark in zip(names, marks):
print(name, mark)
# Jhon 5.5
# Alexander 7
# Bob 10
| 2.984375 | 3 |
MyFirstApps/Calculator/BlaCal-V-3-0-0/BlaCal-3-0-0.py | EnzoH8321/Minis | 0 | 12787945 | <reponame>EnzoH8321/Minis<gh_stars>0
# BlaCal Version 3.0.0
# Programmer BlackIQ
# Library
import math
from termcolor import colored
# Colorising with termcolor
welcome = colored('welcome to BlaCal calculator !', 'blue')
version = colored('verson 3.0.0', 'blue')
jobs = colored('these are the jobs that this program can do !', 'red')
jobs_list = colored('sum , min , mul , div , sqrt , sin , cos , tan', 'yellow')
end_1 = colored("The End", 'red')
end_2 = colored("Programmer : BlackIQ", 'red')
# Functions {
# Describtion function
def start():
print(welcome)
print(version)
print(jobs)
print(jobs_list)
# End function
def end():
print(end_1)
print(end_2)
# Functions Doing
def sum():
sum_1 = float(input("what is first number ? "))
sum_2 = float(input("what is second number ? "))
o_sum = (sum_1+sum_2)
sum_cololed = colored(o_sum, 'green')
print(sum_cololed)
def min():
min_1 = float(input("what is first number ? "))
min_2 = float(input("what is second number ? "))
o_min = (min_1-min_2)
min_cololed = colored(o_min, 'green')
print(min_cololed)
def mul():
mul_1 = float(input("what is first number ? "))
mul_2 = float(input("what is second number ? "))
o_mul = (mul_1*mul_2)
mul_cololed = colored(o_mul, 'green')
print(mul_cololed)
def div():
div_1 = float(input("what is first number ? "))
div_2 = float(input("what is second number ? "))
o_div = (div_1/div_2)
div_cololed = colored(o_div, 'green')
print(div_cololed)
def sqrt():
sqrt_number = float(input("witch number ? "))
o_sqrt = math.sqrt(sqrt_number)
sqrt_colored = colored(o_sqrt, 'green')
print(sqrt_colored)
def sin():
sin_number = float(input("witch number ? "))
o_sin = math.sin(sin_number)
sin_colored = colored(o_sin, 'green')
print(sin_colored)
def cos():
cos_number = float(input("witch number ? "))
o_cos = math.cos(cos_number)
cos_colored = colored(o_cos, 'green')
print(cos_colored)
def tan():
tan_number = float(input("witch number ? "))
o_tan = math.tan(tan_number)
tan_colored = colored(o_tan, 'green')
print(tan_colored)
# Conditional
def doing_me():
if doing == "sum":
sum()
if doing == "min":
min()
if doing == "mul":
mul()
if doing == "div":
div()
if doing == "sqrt":
sqrt()
if doing == "sin":
sin()
if doing == "cos":
cos()
if doing == "tan":
tan()
# Start
start()
# Input
doing = str(input("what do you want to do ? "))
# Main Code
doing_me()
# End
end() | 3.4375 | 3 |
dash_mdc_neptune/themes/utils.py | noosenergy/dash-mdc-neptune | 0 | 12787946 | import base64
import pathlib
from typing import List, Tuple
STATIC_DIR = pathlib.Path(__file__).parent / "static"
def _get_img_uri(img_file: str) -> str:
img_path = STATIC_DIR / img_file
with img_path.open(mode="rb") as f:
img_data = f.read()
encoded = base64.b64encode(img_data).decode("ascii")
return f"data:image/png;base64,{encoded}"
def _to_colourscale(colour_sequence: List[str]) -> List[Tuple[float, str]]:
length = len(colour_sequence)
return [(i / (length - 1), colour) for i, colour in enumerate(colour_sequence)]
| 2.875 | 3 |
plugins/fitter/minimizer.py | xraypy/_xraylarch_attic | 1 | 12787947 | <filename>plugins/fitter/minimizer.py
"""
minimizer for Larch, similar to lmfit-py.
Minimizer is a wrapper around scipy.leastsq, allowing a user to build
a fitting model as a function of general purpose fit parameters which
can be fixed or floated, bounded, or written as larch expressions.
The user sets up a model with a Group which contains all the fitting
parameters, and writes a larch procedure to calculate the residual to
be minimized in terms of the parameters of this Group.
The procedure to calculate the residual will take the parameter Group
as the first argument, and can take additional optional arguments.
params = Group()
params.slope = Param(0, vary=True, min=0)
params.offset = Param(10, vary=True)
def residual(pgroup, xdata=None, ydata=None):
line = pgroup.offset + xdata * pgroup.slope
pgroup.this_line = line
return (ydata - line)
end def
minimize(residual, params, kws={'xdata': x, 'ydata': y})
After this, each of the parameters in the params group will contain
best fit values, uncertainties and correlations, and the params group
will contain fit statistics chisquare, etc.
"""
from numpy import sqrt
from scipy.optimize import leastsq
import re
from larch.utils import OrderedDict
from larch.larchlib import Procedure, DefinedVariable
from larch.symboltable import isgroup
class Parameter(object):
"""A Parameter is the basic Parameter going
into Fit Model. The Parameter holds many attributes:
value, vary, max_value, min_value.
Note that constraints are set elsewhere (with Larch DefinedVariables)
The value and min/max values will be set to floats.
"""
def __init__(self, name=None, value=None, vary=True,
min=None, max=None, expr=None, _larch=None, **kws):
self.name = name
self.value = value
self.init_value = value
self.min = min
self.max = max
self.vary = vary
self.expr = expr
self.stderr = None
self.correl = None
self.defvar = None
if self.expr is not None and _larch is not None:
self.defvar = DefinedVariable(self.expr, _larch=_larch)
self.vary = False
self.value = self.defvar.evaluate()
def __repr__(self):
s = []
if self.name is not None:
s.append("'%s'" % self.name)
val = repr(self.value)
if self.vary and self.stderr is not None:
val = "value=%s +/- %.3g" % (repr(self.value), self.stderr)
elif not self.vary:
val = "value=%s (fixed)" % (repr(self.value))
s.append(val)
s.append("bounds=[%s:%s]" % (repr(self.min), repr(self.max)))
if self.expr is not None:
s.append("expr='%s'" % (self.expr))
return "<Parameter %s>" % ', '.join(s)
class MinimizerException(Exception):
"""General Purpose Exception"""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return "\n%s" % (self.msg)
class Minimizer(object):
"""general minimizer"""
err_nonparam = "params must be a minimizer.Parameters() instance or list of Parameters()"
err_maxfev = """Too many function calls (max set to %i)! Use:
minimize(func, params, ...., maxfev=NNN)
or set leastsq_kws['maxfev'] to increase this maximum."""
def __init__(self, fcn, params, fcn_args=None, fcn_kws=None,
iter_cb=None, scale_covar=True,
_larch=None, jacfcn=None, **kws):
self.userfcn = fcn
self.paramgroup = params
self.userargs = fcn_args
if self.userargs is None:
self.userargs = []
self.userkws = fcn_kws
if self.userkws is None:
self.userkws = {}
self._larch = _larch
self.iter_cb = iter_cb
self.scale_covar = scale_covar
self.kws = kws
self.nfev_calls = 0
self.jacfcn = jacfcn
self.__prepared = False
def __update_params(self, fvars):
"""
set parameter values from values of fitted variables
"""
if not self.__prepared:
print 'fit not prepared!'
group = self.paramgroup
for name, val in zip(self.var_names, fvars):
par = getattr(group, name)
if par.min is not None: val = max(val, par.min)
if par.max is not None: val = min(val, par.max)
par.value = val
for name in self.defvars:
par = getattr(group, name)
par.value = par.defvar.evaluate()
def __residual(self, fvars):
"""
residual function used for least-squares fit.
With the new, candidate values of fvars (the fitting variables),
this evaluates all parameters, including setting bounds and
evaluating constraints, and then passes those to the
user-supplied function to calculate the residual.
"""
self.nfev_calls = self.nfev_calls + 1
self.__update_params(fvars)
out = self.userfcn(self.paramgroup, *self.userargs, **self.userkws)
if hasattr(self.iter_cb, '__call__'):
self.iter_cb(self.params, self.nfev_calls, out,
*self.userargs, **self.userkws)
return out
def __jacobian(self, fvars):
"""
analytical jacobian to be used with the Levenberg-Marquardt
"""
# computing the jacobian
self.__update_params(fvars)
return self.jacfcn(self.paramgroup, *self.userargs, **self.userkws)
def prepare_fit(self):
"""prepare parameters for fit
determine which parameters are actually variables
and which are defined expressions.
"""
if self.__prepared:
return
if not isgroup(self.paramgroup):
return 'param group is not a Larch Group'
self.nfev_calls = 0
self.var_names = []
self.defvars = []
self.vars = []
for name in dir(self.paramgroup):
par = getattr(self.paramgroup, name)
if not isinstance(par, Parameter):
continue
if par.expr is not None:
par.defvar = DefinedVariable(par.expr, _larch=self._larch)
par.vary = False
self.defvars.append(name)
elif par.vary:
self.var_names.append(name)
self.vars.append(par.value)
if par.name is None:
par.name = name
self.nvarys = len(self.vars)
# now evaluate make sure initial values are set
# are used to set values of the defined expressions.
# this also acts as a check of expression syntax.
self.__prepared = True
def leastsq(self, scale_covar=True, **kws):
"""
use Levenberg-Marquardt minimization to perform fit.
This assumes that ModelParameters have been stored,
and a function to minimize has been properly set up.
This wraps scipy.optimize.leastsq, and keyward arguments are passed
directly as options to scipy.optimize.leastsq
When possible, this calculates the estimated uncertainties and
variable correlations from the covariance matrix.
writes outputs to many internal attributes, and
returns True if fit was successful, False if not.
"""
self.prepare_fit()
lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7,
gtol=1.e-7, maxfev=1000*(self.nvarys+1), Dfun=None)
lskws.update(self.kws)
lskws.update(kws)
if lskws['Dfun'] is not None:
self.jacfcn = lskws['Dfun']
lskws['Dfun'] = self.__jacobian
lsout = leastsq(self.__residual, self.vars, **lskws)
vbest, cov, infodict, errmsg, ier = lsout
resid = infodict['fvec']
group = self.paramgroup
lmdif_messag = errmsg
success = ier in [1, 2, 3, 4]
message = 'Fit succeeded.'
if ier == 0:
message = 'Invalid Input Parameters.'
elif ier == 5:
message = self.err_maxfev % lskws['maxfev']
else:
message = 'Fit tolerance may to be too small.'
if cov is None:
message = '%s Could not estimate error-bars' % message
lmdif_out = dict(message=message, lmdif_message=errmsg, ier=ier, success=success)
lmdif_out.update(infodict)
ndata = len(resid)
chisqr = (resid**2).sum()
nfree = (ndata - self.nvarys)
redchi = chisqr / nfree
for name in self.var_names:
par = getattr(group, name)
par.stderr = 0
par.correl = None
if cov is not None:
errorbars = True
covar = cov
if self.scale_covar:
cov = cov * chisqr / nfree
for ivar, name in enumerate(self.var_names):
par = getattr(group, name)
par.stderr = sqrt(cov[ivar, ivar])
par.correl = {}
for jvar, name2 in enumerate(self.var_names):
if jvar != ivar:
par.correl[name2] = (cov[ivar, jvar]/
(par.stderr * sqrt(cov[jvar, jvar])))
setattr(group, 'errorbars', errorbars)
setattr(group, 'covar_vars', self.var_names)
setattr(group, 'covar', cov)
setattr(group, 'lmdif_status', ier)
setattr(group, 'nfcn_calls', infodict['nfev'])
setattr(group, 'residual', resid)
setattr(group, 'message', message)
setattr(group, 'chi_square', chisqr)
setattr(group, 'chi_reduced', redchi)
setattr(group, 'nfree', nfree)
print infodict.keys()
return success
def minimize(fcn, group, args=None, kws=None,
scale_covar=True, iter_cb=None, _larch=None, **fit_kws):
"""simple minimization function,
finding the values for the params which give the
minimal sum-of-squares of the array return by fcn
"""
if not isgroup(group):
return 'param group is not a Larch Group'
fitter = Minimizer(fcn, group, fcn_args=args, fcn_kws=kws,
iter_cb=iter_cb, scale_covar=scale_covar,
_larch=_larch, **fit_kws)
return fitter.leastsq()
def parameter(**kws):
"create a fitting Parameter as a Variable"
return Parameter(**kws)
def guess(value, min=None, max=None, _larch=None, **kws):
"""create a fitting Parameter as a Variable.
A minimum or maximum value for the variable value can be given:
x = guess(10, min=0)
y = guess(1.2, min=1, max=2)
"""
return Parameter(value=value, min=min, max=max, vary=True,
_larch=_larch, expr=None)
def registerLarchPlugin():
return ('_math', {'minimize': minimize,
'param': parameter,
'guess': guess,
})
| 3.125 | 3 |
app/core/migrations/0019_beneficiaire_date_creation.py | rach4you/secteur-app-api | 0 | 12787948 | # Generated by Django 3.0.7 on 2020-06-11 11:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0018_auto_20200609_1810'),
]
operations = [
migrations.AddField(
model_name='beneficiaire',
name='date_creation',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
| 1.539063 | 2 |
sites/build_index.py | diegosouza/hurl-dev | 0 | 12787949 | #!/usr/bin/env python3
import sys
import json
import re
from pathlib import Path
from typing import List, Optional, Any, Dict
from bs4 import BeautifulSoup, Tag
class Hit:
search: str
title: str
anchor: str
content: str
start: int
def __init__(self,
search: str,
page_url: str,
page_title: str,
anchor_url: str,
anchor_title: str,
content: str,
start: int) -> None:
self.search = search
self.page_url = page_url
self.page_title = page_title
self.anchor_url = anchor_url
self.anchor_title = anchor_title
self.content = content
self.start = start
def __str__(self) -> str:
return f"Hit(search={self.search} anchor_url={self.anchor_url} anchor_title={self.anchor_title}"
def build_file_index(f: Path) -> List[Hit]:
"""Construct an index from html file."""
soup = BeautifulSoup(f.read_text(), "html.parser")
try:
title = soup.title.text
except AttributeError:
sys.stderr.write(f"Error in path {f}\n")
raise
relative_path = f.relative_to("hurl.dev/_site/")
url: str
if str(relative_path) == "index.html":
url = "/"
else:
url = f"/{relative_path}"
# On construit une représentation textuelle de la page
# en agrégeant tous les tags contenant du text "significatif"
all_hits: List[Hit] = []
all_tags: List[Tag] = []
root = soup.find("div", class_=re.compile("indexed"))
if not root:
sys.stderr.write(f"No indexed content in path {f}\n")
return []
all_tags.extend(root.find_all("p"))
all_tags.extend(root.find_all("ul"))
all_tags.extend(root.find_all("h2"))
all_tags.extend(root.find_all("h3"))
all_tags.extend(root.find_all("h4"))
for tag in all_tags:
hits = build_tag_index(url=url, title=title, soup=soup, tag=tag)
all_hits.extend(hits)
return all_hits
non_significant_words = [
"all",
"also",
"and",
"any",
"are",
"both",
"but",
"can",
"doc",
"does",
"etc",
"for",
"from",
"has",
"have",
"into",
"one",
"only",
"let",
"may",
"say",
"see",
"set",
"the",
"this",
"than",
"that",
"use",
"yet",
"you",
"very",
"when",
"will",
"with",
]
def build_tag_index(url: str, title: str, soup: BeautifulSoup, tag: Tag) -> List[Hit]:
"""Build serach hit from a p tag."""
anchor_tag = find_anchor(tag)
anchor_url: str
anchor_title: str
if anchor_tag:
anchor_id = anchor_tag["id"]
anchor_url = f"{url}#{anchor_id}"
anchor_title = anchor_tag.text
else:
anchor_url = url
anchor_title = title
# Iterate over each word and construct indices
text = tag.text
text = text.replace(" \n", " ")
text = text.replace("\n", " ")
span = 120
hits: List[Hit] = []
for res in re.finditer(r"\w+", text):
match = res[0]
if len(match) < 3 or match.lower() in non_significant_words:
continue
#if len(match) == 4:
# sys.stderr.write(f"-> {match}\n")
start = res.start()
end = res.end()
if start < span:
content_before = text[:start]
else:
content_before = "..." + text[start-span:start]
if (len(text) - end) < span:
content_after = text[end:]
else:
content_after = text[end:end+span] + "..."
content = content_before + match + content_after
hit = Hit(
search=match.lower(),
page_url=url,
page_title=title,
anchor_url=anchor_url,
anchor_title=anchor_title,
content=content,
start=len(content_before)
)
hits.append(hit)
return hits
def find_anchor(tag: Optional[Any]) -> Optional[Tag]:
if isinstance(tag, Tag) and tag.get("id"):
return tag
else:
if tag.previous_sibling:
return find_anchor(tag.previous_sibling)
elif tag.parent:
return find_anchor(tag.parent)
else:
return None
def split(word: str, start: int):
return [word[:end] for end in range(start, len(word)+1)]
def serialize_hits(hits: List[Hit]) -> str:
hits_refs: Dict[str, List[int]] = {}
# Pour chaque hit, on construit une list de
for i in range(0, len(hits)):
h = hits[i]
words = split(h.search, 3)
for w in words:
hr = hits_refs.get(w)
if hr:
hr.append(i)
else:
hr = [i]
hits_refs[w] = hr
d = {"hits": hits_refs, "refs": hits}
return json.dumps(d, default=lambda o: o.__dict__, sort_keys=True)
def main():
sys.stderr.write("Building search index...\n")
site = Path("hurl.dev/_site")
files = list(site.glob("**/*.html"))
all_hits: List[Hit] = []
for f in files:
hits = build_file_index(f)
all_hits.extend(hits)
index = serialize_hits(all_hits)
print(index)
if __name__ == "__main__":
main()
| 2.890625 | 3 |
LintCode/BFS/20191212_course_schedule_II.py | Fabriceli/MachingLearning | 0 | 12787950 | <reponame>Fabriceli/MachingLearning<filename>LintCode/BFS/20191212_course_schedule_II.py
# -*-coding:utf-8 -*-
# Reference:**********************************************
# @Time : 2019-12-14 13:54
# @Author : <NAME>
# @File : 20191212_course_schedule_II.py
# @User : liyihao
# @Software : PyCharm
# @Description: There are a total of n courses you have to take, labeled from 0 to n-1.
#
# Some courses may have prerequisites, for example to take course 0 you
# have to first take course 1, which is expressed as a pair: [0,1]
#
# Given the total number of courses and a list of prerequisite pairs,
# return the ordering of courses you should take to finish all courses.
#
# There may be multiple correct orders, you just need to return one of them.
# If it is impossible to finish all courses, return an empty array.
# Reference:**********************************************
"""
Input: 2, [[1,0]]
Output: [0,1]
Explanation: There are a total of 2 courses to take. To take course 1 you should have finished
course 0. So the correct course order is [0,1] .
Input: 4, [[1,0],[2,0],[3,1],[3,2]]
Output: [0,1,2,3] or [0,2,1,3]
Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both
courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3]
Note:
The input prerequisites is a graph represented by a list of edges, not adjacency matrices.
Read more about how a graph is represented.
You may assume that there are no duplicate edges in the input prerequisites.
"""
import collections
from typing import List
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
in_degree, neighbors = self.get_in_degree(numCourses, prerequisites)
result = []
queue = collections.deque(n for n in range(numCourses) if in_degree[n] == 0)
while queue:
n = queue.popleft()
result.append(n)
for x in neighbors[n]:
in_degree[x] -= 1
if in_degree[x] == 0:
queue.append(x)
if len(result) == numCourses:
return result[::-1]
else:
return []
def get_in_degree(self, numCourses, prerequisites):
result = {}
neighbors = {}
for i in range(numCourses):
result[i] = 0
neighbors[i] = []
for i, j in prerequisites:
result[j] += 1
neighbors[i].append(j)
return result, neighbors
if __name__ == '__main__':
s = Solution()
numCourses = 3
pre = [[0, 2], [1, 2], [2, 0]]
print(s.findOrder(numCourses, pre))
| 3.28125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.