max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
build_fake_image__build_exe/__injected_code.py | DazEB2/SimplePyScripts | 117 | 12788651 | <reponame>DazEB2/SimplePyScripts
import os.path
from pathlib import Path
file_name = Path(os.path.expanduser("~/Desktop")).resolve() / "README_YOU_WERE_HACKED.txt"
file_name.touch(exist_ok=True)
| 2.265625 | 2 |
tests/fixtures/simple_app.py | ravenac95/flask-command | 4 | 12788652 | <gh_stars>1-10
from flaskcommand import flask_command
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return "hello, there"
main = flask_command(app)
if __name__ == '__main__':
main()
| 2.3125 | 2 |
server/utils/read_data.py | RaulRomani/Interactive-Data-Projection | 1 | 12788653 | import pandas as pd
import numpy as np
dataset_name = "Caltech"
relative = "../../../"
df = pd.read_csv(relative + "datasets/" + dataset_name + '/'+ dataset_name + '.csv', sep=";", header=None)
df = df.drop(0, 1)
print(df.describe())
print(df.nunique())
print(df.head())
print(df.shape)
df[11] = pd.Categorical(df[11])
df[11] = df[11].cat.codes
num_cols = df.shape[1]-1
np.savetxt(relative + "datasets/" + dataset_name + '/' + dataset_name + "_prep_encoding2.csv", df.values[:,:num_cols], delimiter=",")
np.savetxt(relative + "datasets/" + dataset_name + '/' + dataset_name + "_labels.csv", df.values[:,num_cols], delimiter=",")
import umap
X_embedded = umap.UMAP().fit_transform(df.values[:,:num_cols])
import matplotlib.pyplot as plt
plt.scatter(X_embedded[:,0], X_embedded[:,1], c = df.values[:,num_cols])
plt.show() | 3.03125 | 3 |
sso/user/migrations/0033_serviceemailaddress.py | uktrade/staff-sso | 7 | 12788654 | # Generated by Django 2.2.13 on 2020-07-07 17:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
(
"samlidp",
"0003_samlapplication_allow_access_by_email_suffix_squashed_0004_auto_20200420_1246",
),
("user", "0032_user_last_modified"),
]
operations = [
migrations.CreateModel(
name="ServiceEmailAddress",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"email",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="service_emails",
to="user.EmailAddress",
),
),
(
"saml_application",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="samlidp.SamlApplication"
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="service_emails",
to="user.User",
),
),
],
options={
"unique_together": {("user", "saml_application", "email")},
},
),
]
| 1.78125 | 2 |
CalibTracker/SiStripCommon/python/ShallowGainCalibration_cfi.py | ckamtsikis/cmssw | 852 | 12788655 | <reponame>ckamtsikis/cmssw<filename>CalibTracker/SiStripCommon/python/ShallowGainCalibration_cfi.py
import FWCore.ParameterSet.Config as cms
shallowGainCalibration = cms.EDProducer("ShallowGainCalibration",
Tracks=cms.InputTag("generalTracks",""),
Prefix=cms.string("GainCalibration"),
Suffix=cms.string(""))
| 1.125 | 1 |
ipyannotations/images/canvases/image_utils.py | janfreyberg/ipyannotate | 19 | 12788656 | <filename>ipyannotations/images/canvases/image_utils.py
import io
import pathlib
import re
import typing
from dataclasses import dataclass
from functools import singledispatch, wraps
from typing import Any, Callable, Optional, Sequence, Tuple
import ipywidgets as widgets
import numpy as np
from ipycanvas import Canvas
from PIL import Image, ImageEnhance
URL_REGEX = re.compile(
r"^(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/)?"
+ r"[a-z0-9]+([\-\.]{1}[a-z0-9]+)*\.[a-z]{2,5}(:[0-9]{1,5})"
+ r"?(\/.*)?$"
)
@dataclass
class URL:
value: str
def __bool__(self):
return bool(URL_REGEX.match(self.value))
def adjust(
img: widgets.Image, contrast_factor: float, brightness_factor: float
) -> widgets.Image:
"""Adjust an image.
Parameters
----------
img : widgets.Image
contrast_factor : float
How much to multiply the contrast by.
brightness_factor : float
How much to multiply the brightness by.
Returns
-------
widgets.Image
"""
# turn widgets.Image into Pillow Image
pil_image = Image.open(io.BytesIO(img.value))
# apply adjustments
pil_image = ImageEnhance.Contrast(pil_image).enhance(contrast_factor)
pil_image = ImageEnhance.Brightness(pil_image).enhance(brightness_factor)
# turn back into a widget
buffer = io.BytesIO()
pil_image.save(buffer, "JPEG")
buffer.seek(0)
return widgets.Image(value=buffer.read(), format="jpg")
@singledispatch
def load_img(img: typing.Any):
"""
Load an image, whether it's from a URL, a file, an array, or an already
in-memory image.
Parameters
----------
img : widgets.Image
"""
raise ValueError(f"Can not load object of type {type(img)} as image.")
@load_img.register(widgets.Image)
def _img_already_widget(img: widgets.Image):
return img
@load_img.register(bytes)
def _img_already_loaded(img: bytes):
return widgets.Image(value=img)
@load_img.register(pathlib.Path)
def _load_img_path(img: pathlib.Path):
"""Read image from file"""
return load_img(img.read_bytes())
@load_img.register(str)
def _load_img_string(img: str):
"""Read image from file or from URL"""
img_path = pathlib.Path(img)
if img_path.is_file():
return load_img(img_path)
img_url = URL(img)
if img_url:
return load_img(img_url)
raise ValueError(f"{img} is neither an existing path nor a valid URL.")
@load_img.register(URL)
def _load_img_url(img: URL):
import requests # noqa: F401
response = requests.get(img.value)
response.raise_for_status()
return load_img(response.content)
@load_img.register(np.ndarray)
def _load_img_ndarray(img: np.ndarray):
"""create image from array"""
img = Image.fromarray(img.astype(np.uint8))
return load_img(img)
@load_img.register(Image.Image)
def _load_img_pillow(img: Image.Image):
"""Encode image as bytes"""
image_io = io.BytesIO()
img.save(image_io, "JPEG")
return load_img(image_io.getvalue())
def fit_image(
img: widgets.Image, canvas: Canvas
) -> Tuple[int, int, int, int, int, int]:
"""Fit an image inside a canvas.
Parameters
----------
img : widgets.Image
canvas : Canvas
Returns
-------
Tuple[int, int, int, int, int, int]
The x and y offset; width and height on the canvas; and original image
width and height.
"""
img_width, img_height = Image.open(io.BytesIO(img.value)).size
canvas_width, canvas_height = canvas.size
height_ratio, width_ratio = (
img_height / canvas_height,
img_width / canvas_width,
)
if height_ratio <= 1 and width_ratio <= 1:
# we can fill and center the whole image
width, height = img_width, img_height
elif height_ratio >= width_ratio:
# height is the limiting factor:
height = int(img_height / height_ratio)
width = int(img_width / height_ratio)
elif height_ratio <= width_ratio:
# width is the limiting factor:
height = int(img_height / width_ratio)
width = int(img_width / width_ratio)
x, y = (canvas_width // 2 - width // 2, canvas_height // 2 - height // 2)
return x, y, width, height, img_width, img_height
def dist(q: Sequence[float], p: Sequence[float]) -> float:
"""Euclidian distance between two points.
Parameters
----------
q : Sequence[float]
Point q
p : Sequence[float]
Point p
Returns
-------
float
The distance between point q and p.
"""
return (sum((px - qx) ** 2.0 for px, qx in zip(p, q))) ** 0.5
def trigger_redraw(fn: Callable) -> Callable:
"""Method decorator for functions that need to trigger a re-draw.
Parameters
----------
fn : Callable
The function that needs to trigger a re-draw, e.g. because it changes
the appearance of the canvas.
Returns
-------
Callable
A wrapped function that, when called, calls the input function and then
calls the re-draw method on the class.
"""
@wraps(fn)
def wrapped_fn(self, *args, **kwargs):
outp = fn(self, *args, **kwargs)
self.re_draw()
return outp
return wrapped_fn
def only_inside_image(
fn: Callable[[Any, float, float], Optional[Any]]
) -> Callable:
"""Method decorator for function that needs to only work inside the image.
The input should be a method that accepts x and y.
Parameters
----------
fn : Callable
The method that accepts self, x and y.
Returns
-------
Callable
A wrapped function that, when called, returns None if x and y are not
inside the image (indicated by self.image_extent)
"""
@wraps(fn)
def wrapped_fn(self, x, y):
if not self.image_extent[0] <= x <= self.image_extent[2]:
return
if not self.image_extent[1] <= y <= self.image_extent[3]:
return
x, y = self.canvas_to_image_coordinates((x, y))
return fn(self, x, y)
return wrapped_fn
| 2.765625 | 3 |
Python/Algorithm/5.Tree.py | LilyYC/legendary-train | 0 | 12788657 | """Tree Practice
=== Module description ===
- Task 1, which contains one Tree method to implement.
- Task 2, which asks you to implement two operations that allow you
to convert between trees and nested lists.
- Task 3, which asks you to learn about and use a more restricted form of
trees known as *binary trees*.
"""
from typing import Optional, List, Union
class Tree:
"""A recursive tree data structure.
Note the relationship between this class and LinkedListRec
from Lab 7; the only major difference is that _rest
has been replaced by _subtrees to handle multiple
recursive sub-parts.
"""
# === Private Attributes ===
# The item stored at this tree's root, or None if the tree is empty.
_root: Optional[object]
# The list of all subtrees of this tree.
_subtrees: List['Tree']
# === Representation Invariants ===
# - If self._root is None then self._subtrees is an empty list.
# This setting of attributes represents an empty Tree.
# - self._subtrees may be empty when self._root is not None.
# This setting of attributes represents a tree consisting of just one
# node.
# === Methods ===
def __init__(self, root: object, subtrees: List['Tree']) -> None:
"""Initialize a new Tree with the given root value and subtrees.
If <root> is None, the tree is empty.
Precondition: if <root> is None, then <subtrees> is empty.
"""
self._root = root
self._subtrees = subtrees
def is_empty(self) -> bool:
"""Return True if this tree is empty.
>>> t1 = Tree(None, [])
>>> t1.is_empty()
True
>>> t2 = Tree(3, [])
>>> t2.is_empty()
False
"""
return self._root is None
##############################################################################
# Task 1: Another tree method
##############################################################################
def __eq__(self, other: 'Tree') -> bool:
"""Return whether <self> and <other> are equal.
Hint: you can use the standard structure for recursive functions on
trees, except that you'll want to loop using an index:
`for i in range(len(self._subtrees))`)
This way, you can access the corresponding subtree in `other`.
"""
if self.is_empty():
return other.is_empty()
elif len(self._subtrees) == 0:
return self._root == other._root and len(other._subtrees) == 0
elif len(self._subtrees) == len(other._subtrees):
for subtree_index in range(len(self._subtrees)):
if self._subtrees[subtree_index] != \
other._subtrees[subtree_index]:
return False
return True
##############################################################################
# Task 2: Trees and nested lists
##############################################################################
def to_nested_list(self) -> list:
"""Return the nested list representation of this tree.
"""
nested_list = []
if self.is_empty():
return nested_list
elif len(self._subtrees) == 0:
nested_list.append(self._root)
return nested_list
else:
nested_list.append(self._root)
sub_list = []
for subtree_index in range(len(self._subtrees)):
sub_list.append(self._subtrees[subtree_index].to_nested_list())
nested_list.extend(sub_list)
return nested_list
def to_tree(obj: Union[int, List]) -> 'Tree':
"""Return the Tree which <obj> represents.
You may not access Tree attributes directly. This function can be
implemented only using the Tree initializer. >>> tree3 = Tree(3, [])
>>> tree2 = Tree(2, [tree3])
>>> tree1 = Tree(1, [tree2])
>>> nested_tree = tree1.to_nested_list() # [1, [2, [3]]]
>>> type(to_tree(nested_tree))
'Tree'
>>> to_tree(nested_tree)._root
1
>>> to_tree(nested_tree)._subtrees
2
>>> tree3 = Tree(3, [])
>>> tree2 = Tree(2, [tree3])
>>> tree1 = Tree(1, [tree2])
>>> tree1.to_nested_list()
[1, [2, [3]]]
"""
subtree = []
if obj == []:
return Tree(None, subtree)
elif len(obj) == 1:
root = obj[0]
return Tree(root, subtree)
else:
root = obj[0]
# tree = Tree(obj[0], subtree) # obj is a List of int and list
for item in range(1, len(obj)):
subtree.append(to_tree(obj[item]))
return Tree(root, subtree)
##############################################################################
# Task 3: Binary trees
##############################################################################
class BinaryTree:
"""A class representing a binary tree.
A binary tree is either empty, or a root connected to
a *left* binary tree and a *right* binary tree (which could be empty).
"""
# === Private Attributes ===
_root: Optional[object]
_left: Optional['BinaryTree']
_right: Optional['BinaryTree']
# === Representation Invariants ===
# _root, _left, _right are either ALL None, or none of them are None.
# If they are all None, this represents an empty BinaryTree.
def __init__(self, root: Optional[object],
left: Optional['BinaryTree'],
right: Optional['BinaryTree']) -> None:
"""Initialise a new binary tree with the given values.
If <root> is None, this represents an empty BinaryTree
(<left> and <right> are ignored in this case).
Precondition: if <root> is not None, then neither <left> nor <right>
are None.
"""
if root is None:
# store an empty BinaryTree
self._root = None
self._left = None
self._right = None
else:
self._root = root
self._left = left
self._right = right
def is_empty(self) -> bool:
"""Return True if this binary tree is empty.
Note that only empty binary trees can have left and right
attributes set to None.
"""
return self._root is None
def preorder(self) -> list:
"""Return a list of this tree's items using a *preorder* traversal.
"""
result = []
if self.is_empty():
return result
else:
result.append(self._root)
result += self._left.preorder()
result += self._right.preorder()
return result
def inorder(self) -> list:
"""Return a list of this tree's items using an *inorder* traversal.
"""
result = []
if self.is_empty():
return result
result += self._left.inorder()
result.append(self._root)
result += self._right.inorder()
return result
def postorder(self) -> list:
"""Return a list of this tree's items using a *postorder* traversal.
"""
result = []
if self.is_empty():
return result
result += self._left.postorder()
result += self._right.postorder()
result.append(self._root)
return result
if __name__ == '__main__':
import python_ta
python_ta.check_all()
| 4.28125 | 4 |
workstation-backend/account/urls.py | cindy21td/WorkStation | 0 | 12788658 | from django.urls import path
from .views import RegisterView
urlpatterns = [
path("register", RegisterView.as_view(), name="account-register"),
]
| 1.492188 | 1 |
Week 1 Exercises/vara_varb.py | parkerbxyz/MITx-6.00.1x-2T2019a | 1 | 12788659 | <reponame>parkerbxyz/MITx-6.00.1x-2T2019a
if type(varA) is str or type(varB) is str:
print('string involved')
elif varA == varB:
print('equal')
elif varA > varB:
print('bigger')
elif varA < varB:
print('smaller')
| 3.609375 | 4 |
ufits.py | zhongmicai/ITS_clustering | 1 | 12788660 | #!/usr/bin/env python
#Wrapper script for UFITS package.
import sys, os, subprocess, inspect, tarfile, shutil, urllib2, urlparse
script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0,script_path)
import lib.ufitslib as ufitslib
URL = { 'ITS': 'https://www.dropbox.com/s/3eofu8rjgr242jk/ITS.ufits.tar.gz?dl=1',
'16S': 'https://www.dropbox.com/s/dqbrr9wsqnki2di/16S.ufits.tar.gz?dl=1',
'LSU': 'https://www.dropbox.com/s/xqrted7sts48hfl/LSU.ufits.tar.gz?dl=1',
'COI': 'https://www.dropbox.com/s/dm10eqsmf01q51c/COI.ufits.tar.gz?dl=1' }
def flatten(l):
flatList = []
for elem in l:
# if an element of a list is a list
# iterate over this list and add elements to flatList
if type(elem) == list:
for e in elem:
flatList.append(e)
else:
flatList.append(elem)
return flatList
def fmtcols(mylist, cols):
justify = []
for i in range(0,cols):
length = max(map(lambda x: len(x), mylist[i::cols]))
length += 2
ljust = map(lambda x: x.ljust(length), mylist[i::cols])
justify.append(ljust)
justify = flatten(justify)
num_lines = len(mylist) / cols
lines = (' '.join(justify[i::num_lines])
for i in range(0,num_lines))
return "\n".join(lines)
def download(url, name):
file_name = name
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print("Downloading: {0} Bytes: {1}".format(url, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
p = float(file_size_dl) / file_size
status = r"{0} [{1:.2%}]".format(file_size_dl, p)
status = status + chr(8)*(len(status)+1)
sys.stdout.write(status)
f.close()
version = '0.7.2'
default_help = """
Usage: ufits <command> <arguments>
version: %s
Description: UFITS is a package of scripts to process NGS amplicon data.
Dependencies: USEARCH v9.1.13 and VSEARCH v2.2.0
Process: ion pre-process Ion Torrent data (find barcodes, remove primers, trim/pad)
illumina pre-process folder of de-multiplexed Illumina data (gunzip, merge PE, remove primers, trim/pad)
illumina2 pre-process Illumina data from a single file (read structure: <barcode><f_primer>READ<r_primer>)
454 pre-process Roche 454 (pyrosequencing) data (find barcodes, remove primers, trim/pad)
show show number or reads per barcode from de-multiplexed data
select select reads (samples) from de-multiplexed data
remove remove reads (samples) from de-multiplexed data
sample sub-sample (rarify) de-multiplexed reads per sample
Clustering: cluster cluster OTUs (using UPARSE algorithm)
dada2 run dada2 denoising algorithm, produces "inferred sequences" (requires R, dada2, ShortRead)
unoise2 run UNOISE2 denoising algorithm
cluster_ref closed/open reference based clustering (EXPERIMENTAL)
Utilities: filter OTU table filtering
taxonomy Assign taxonomy to OTUs
summarize Summarize Taxonomy (create OTU-like tables and/or stacked bar graphs for each level of taxonomy)
funguild Run FUNGuild (annotate OTUs with ecological information)
meta pivot OTU table and append to meta data
heatmap Create heatmap from OTU table
SRA De-multiplex data and create meta data for NCBI SRA submission
Setup: install Download/install pre-formatted taxonomy DB (UNITE DB formatted for UFITS). Only need to run once.
database Format Reference Databases for Taxonomy
primers List primers hard-coded in UFITS. Can use in pre-processing steps.
Written by <NAME> (2015) <EMAIL>
""" % version
if len(sys.argv) > 1:
if sys.argv[1] == 'ion':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script processes Ion Torrent PGM data for UFITS clustering. The input to this script should be a
FASTQ file obtained from the Torrent Server analyzed with the `--disable-all-filters` flag to the
BaseCaller. This script does the following: 1) finds Ion barcode sequences, 2) relabels headers with
appropriate barcode name, 3) removes primer sequences, 4) trim/pad reads to a set length.
Arguments: -i, --fastq,--bam Input BAM or FASTQ file (Required)
-o, --out Output base name. Default: out
-m, --mapping_file QIIME-like mapping file
-f, --fwd_primer Forward primer sequence. Default: fITS7
-r, --rev_primer Reverse primer sequence Default: ITS4
-b, --barcodes Barcodes used (list, e.g: 1,3,4,5,20). Default: all
-n, --name_prefix Prefix for re-naming reads. Default: R_
-l, --trim_len Length to trim/pad reads. Default: 250
--min_len Minimum length read to keep. Default: 50
--full_length Keep only full length sequences.
--barcode_fasta FASTA file containing barcodes. Default: pgm_barcodes.fa
--barcode_mismatch Number of mismatches in barcodes to allow. Default: 0
--primer_mismatch Number of mismatches in primers to allow. Default: 2
--cpus Number of CPUs to use. Default: all
--mult_samples Combine multiple chip runs, name prefix for chip
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-process_ion.py')
arguments.insert(0, cmd)
arguments.append('--ion')
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'illumina2':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script takes Illumina MiSeq data that is not de-multiplexed and has read structure similar to Ion/454
such that the reads are <barcode><fwd_primer>Read<rev_primer> for clustering using UFITS. The default
behavior is to: 1) merge the PE reads using USEARCH, 2) find barcodes, 3)find and trim primers,
3) rename reads according to sample name, 4) trim/pad reads to a set length. This script can also handle
dual barcodes (3' barcodes using the --reverse_barcode option).
Arguments: -i, --fastq Input FASTQ file (Required)
--reverse Illumina PE reverse reads.
-o, --out Output base name. Default: out
-m, --mapping_file QIIME-like mapping file
-f, --fwd_primer Forward primer sequence. Default: fITS7
-r, --rev_primer Reverse primer sequence Default: ITS4
-n, --name_prefix Prefix for re-naming reads. Default: R_
-l, --trim_len Length to trim/pad reads. Default: 250
--min_len Minimum length read to keep. Default: 50
--barcode_fasta FASTA file containing barcodes. Default: pgm_barcodes.fa
--reverse_barcode FASTA file containing 3' barcodes. Default: none
--full_length Keep only full length sequences.
--primer_mismatch Number of mismatches in primers to allow. Default: 2
--barcode_mismatch Number of mismatches in barcodes to allow. Default: 0
--cpus Number of CPUs to use. Default: all
-u, --usearch USEARCH executable. Default: usearch9
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-process_ion.py')
arguments.insert(0, cmd)
arguments.append('--illumina')
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'illumina':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script takes a folder of Illumina MiSeq data that is already de-multiplexed and processes it for
clustering using UFITS. The default behavior is to: 1) merge the PE reads using USEARCH, 2) find and
trim away primers, 3) rename reads according to sample name, 4) trim/pad reads to a set length.
Arguments: -i, --fastq Input folder of FASTQ files (Required)
-o, --out Output folder name. Default: ufits-data
-m, --mapping_file QIIME-like mapping file
-f, --fwd_primer Forward primer sequence. Default: fITS7
-r, --rev_primer Reverse primer sequence Default: ITS4
-n, --name_prefix Prefix for re-naming reads. Default: R_
-l, --trim_len Length to trim/pad reads. Default: 250
--min_len Minimum length read to keep. Default: 50
--full_length Keep only full length sequences.
--reads Paired-end or forward reads. Default: paired [paired, forward]
--read_length Illumina Read length (250 if 2 x 250 bp run). Default: 300
--rescue_forward Rescue Forward Reads if PE do not merge, e.g. long amplicons. Default: on [on,off]
--require_primer Require the Forward primer to be present. Default: on [on,off]
--primer_mismatch Number of mismatches in primers to allow. Default: 2
--cpus Number of CPUs to use. Default: all
--cleanup Remove intermediate files.
-u, --usearch USEARCH executable. Default: usearch9
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-process_illumina_folder.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == '454':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script processes Roche 454 data for UFITS clustering. The input to this script should be either a
SFF file, FASTA+QUAL files, or FASTQ file. This script does the following: 1) finds barcode sequences,
2) relabels headers with appropriate barcode name, 3) removes primer sequences, 4) trim/pad reads to a set length.
Arguments: -i, --sff, --fasta Input file (SFF, FASTA, or FASTQ) (Required)
-q, --qual QUAL file (Required if -i is FASTA).
-o, --out Output base name. Default: out
-m, --mapping_file QIIME-like mapping file
-f, --fwd_primer Forward primer sequence. Default: fITS7
-r, --rev_primer Reverse primer sequence Default: ITS4
-n, --name_prefix Prefix for re-naming reads. Default: R_
-l, --trim_len Length to trim/pad reads. Default: 250
--min_len Minimum length read to keep. Default: 50
--barcode_fasta FASTA file containing barcodes. (Required)
--reverse_barcode FASTA file containing 3' barcodes. Default: none
--primer_mismatch Number of mismatches in primers to allow. Default: 2
--barcode_mismatch Number of mismatches in barcodes to allow. Default: 0
--cpus Number of CPUs to use. Default: all
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-process_ion.py')
arguments.insert(0, cmd)
arguments.append('--454')
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'cluster':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script is a "wrapper" for the UPARSE algorithm. FASTQ quality trimming via expected
errors and Dereplication are run in vsearch if installed otherwise defaults to Python
which allows for the use of datasets larger than 4GB.
Chimera filtering and UNOISE are also options.
Arguments: -i, --fastq Input FASTQ file (Required)
-o, --out Output base name. Default: out
-e, --maxee Expected error quality trimming. Default: 1.0
-p, --pct_otu OTU Clustering Radius (percent). Default: 97
-m, --minsize Minimum size to keep (singleton filter). Default: 2
--uchime_ref Run Ref Chimera filtering. Default: off [ITS, LSU, COI, 16S, custom path]
--map_filtered Map quality filtered reads back to OTUs. Default: off
--unoise Run De-noising pre-clustering (UNOISE). Default: off
--debug Keep intermediate files.
-u, --usearch USEARCH executable. Default: usearch9
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-OTU_cluster.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'cluster_ref':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script first quality filters reads, dereplicates, and then runs chimera
filtering. OTUs are then picked via reference based clustering (closed)
those that are > --id. The rest of the data can then be clustered via
de novo UPARSE and then reference clustered using UTAX. EXPERIMENTAL
Arguments: -i, --fastq Input FASTQ file (Required)
-d, --db Database [ITS,ITS1,ITS2,16S,LSU,COI,custom]. (Required)
-o, --out Output base name. Default: out
-e, --maxee Expected error quality trimming. Default: 1.0
-p, --pct_otu OTU Clustering Radius (percent). Default: 97
-m, --minsize Minimum size to keep (singleton filter). Default: 2
--id Percent ID for closed reference clustering. Default: 97
--utax_db UTAX formatted DB.
--utax_level UTAX Taxonomy level to keep. Default: k [k,p,c,o,f,g,s]
--utax_cutoff UTAX confidence value threshold. Default: 0.8 [0 to 0.9]
--mock Mock community fasta file
--closed_ref_only Run only closed reference clustering.
--map_filtered Map quality filtered reads back to OTUs. Default: off
--debug Keep intermediate files.
-u, --usearch USEARCH executable. Default: usearch9
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-OTU_cluster_ref.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'dada2':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script is a "wrapper" for the DADA2 pipeline. It will "pick OTUs" based on denoising
the data for each read predicting the original sequence. This pipeline is sensitive to
1 bp differences between sequences. Since most reference databases classify "species"
at 97%% threshold, the inferred sequences (iSeqs) from DADA2 are then clusterd at --pct_otu
to create OTUs. Both results are saved. Requires R & R packages: dada2, ShortRead
Arguments: -i, --fastq Input FASTQ file (Required)
-o, --out Output base name. Default: dada2
-l, --length Length to trim reads. (Required)
-e, --maxee Expected error quality trimming. Default: 1.0
-p, --pct_otu OTU Clustering Radius (percent). Default: 97
--platform Sequencing platform. [ion, illumina, 454]. Default: ion
--pool Pool all samples together for DADA2. Default: off
--uchime_ref Run Ref Chimera filtering. Default: off [ITS, LSU, COI, 16S, custom path]
--debug Keep intermediate files.
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-dada2.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'unoise2':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script will run the UNOISE2 denoising algorithm followed by clustering with
UCLUST to generate OTUs. OTU table is then constructed by mapping reads to
the OTUs. Requires USEARCH v9.0.232 or greater.
Arguments: -i, --fastq Input FASTQ file (Required)
-o, --out Output base name. Default: out
-e, --maxee Expected error quality trimming. Default: 1.0
-m, --minampout Minimum size to keep for denoising. Default: 4
-p, --pct_otu OTU Clustering Radius (percent). Default: 97
-u, --usearch Path to USEARCH9. Default: usearch9
--uchime_ref Run Ref Chimera filtering. Default: off [ITS, LSU, COI, 16S, custom path]
--debug Keep intermediate files.
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-unoise2.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'filter':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script filters OTU table generated from the `ufits cluster` command and should be run on all datasets to combat
barcode-switching or index-bleed (as high as 0.3 pct in MiSeq datasets, ~ 0.2 pct in Ion PGM datasets). This script
works best when a spike-in control sequence is used, e.g. Synthetic Mock, although a mock is not required.
Required: -i, --otu_table OTU table
-f, --fasta OTU fasta
Optional: -o, --out Base name for output files. Default: use input basename
-b, --mock_barcode Name of barcode of mock community (Recommended)
--mc Mock community FASTA file. Default: ufits_synmock.fa
Filtering -n, --normalize Normalize reads to number of reads per sample [y,n]. Default: y
-p, --index_bleed Filter index bleed between samples (percent). Default: 0.005
-s, --subtract Threshold to subtract from all OTUs (any number or auto). Default: 0
-d, --delimiter Delimiter of OTU tables. Default: csv [csv, tsv]
--min_reads_otu Minimum number of reads for valid OTU from whole experiment. Default: 2
--col_order Column order (comma separated list). Default: sort naturally
--keep_mock Keep Spike-in mock community. Default: False
--show_stats Show OTU stats on STDOUT
--cleanup Remove intermediate files.
-u, --usearch USEARCH executable. Default: usearch9
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-filter.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'select':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script filters de-multiplexed data (.demux.fq) to select only reads from samples provided
in a text file, one name per line.
Required: -i, --input Input FASTQ file (.demux.fq)
-l, --list List of sample (barcode) names to keep, separate by space
-f, --file List of sample (barcode) names to keep in a file, one per line
-o, --out Output file name
--format File format for output file. Default: fastq [fastq, fasta]
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'util', 'ufits-keep_samples.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'remove':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script filters de-multiplexed data (.demux.fq) to remove only reads from samples provided
in a text file, one name per line.
Required: -i, --input Input FASTQ file (.demux.fq)
-l, --list List of sample (barcode) names to remove, separate by space
-f, --file List of sample (barcode) names to remove in a file, one per line
-o, --out Output file name
--format File format for output file. Default: fastq [fastq, fasta]
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'util', 'ufits-remove_samples.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'sample':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script sub-samples (rarifies) de-multiplexed data to equal number of reads per sample. For community
analysis, this might not be appropriate as you are ignoring a portion of your data, however, there
might be some applications where it is useful.
Required: -i, --input Input FASTQ file
-n, --num_reads Number of reads to sub-sample to
-o, --out Output FASTQ file name
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'util', 'ufits-barcode_rarify.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'meta':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script takes meta data file in CSV format (e.g. from excel) and an OTU table as input. The first column
of the meta data file must match the OTU table sample headers exactly. It then pivots the OTU table and
appends it to the meta data file.
Required: -i, --input Input OTU table
-m, --meta Meta data table (csv format)
-o, --out Output (meta data + pivotted OTU table)
--split_taxonomy Make separate tables for groups of taxonomy [k,p,c,o,f,g]
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'util', 'ufits-merge_metadata.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'show':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script takes de-multiplexed data (.demux.fq) as input and counts reads per barcode.
Required: -i, --input Input FASTQ file (.demux.fq)
--quality_trim Quality trim reads
-e, --maxee maxEE threshold for quality. Default: 1.0
-l, --length truncation length for trimming: Default: 250
-o, --out Output FASTQ file name (--quality_trim only)
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'util', 'ufits-get_barcode_counts.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'funguild':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script takes OTU table as input and runs FUNGuild to assing functional annotation to an OTU
based on the Guilds database. Guilds script written by <NAME> (2015).
Options: -i, --input Input OTU table
-d, --db Database to use [fungi, nematode]. Default: fungi
-o, --out Output file basename.
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'util', 'Guilds_v1.0.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'heatmap':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script creates a heatmap from an OTU table. Several settings are customizable. Requires Matplotlib,
numpy, and pandas.
Arguments: -i, --input Input OTU table (Required)
-o, --output Output file (Required)
-m, --method Type of heatmap. Default: clustermap [clustermap,heatmap]
-d, --delimiter Delimiter of OTU table. Default: tsv [tsv,csv]
--font Font set. Default: arial
--color Color Palette. Default: gist_gray_r
--figsize Figure size. Default: 2x8
--annotate Annotate heatmap with values.
--distance_metric Distance metric to use for clustermap. Default: braycurtis
--cluster_columns Cluster the columns (samples). Default: False [True,False]
--cluster_method Clustering method for clustermap. Default: single [single,complete,average,weighted]
--scaling Scale the data by row. Default: None [None, z_score, standard]
--yaxis_fontsize Y-Axis Font Size. Default: 6
--xaxis_fontsize X-Axis Font Size. Default: 6
--debug Print pandas table on import to terminal
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'util', 'csv2heatmap.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'taxonomy':
db_list = ['DB_name', 'DB_type', 'FASTA originated from', 'Fwd Primer', 'Rev Primer', 'Records']
okay_list = []
search_path = os.path.join(script_path, 'DB')
for file in os.listdir(search_path):
if file.endswith(".udb"):
okay_list.append(file)
info_file = file + '.txt'
with open(os.path.join(search_path, info_file), 'rU') as info:
line = info.readlines()
line = [words for segments in line for words in segments.split()]
line.insert(0, file)
db_list.append(line)
if len(db_list) < 7:
db_print = "No DB configured, run 'ufits database' or 'ufits install' command."
else:
d = flatten(db_list)
db_print = fmtcols(d, 6)
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script maps OTUs to taxonomy information and can append to an OTU table (optional). By default the script
uses a hybrid approach, e.g. gets taxonomy information from SINTAX, UTAX, and global alignment hits from the larger
UNITE-INSD database, and then parses results to extract the most taxonomy information that it can at
'trustable' levels. SINTAX/UTAX results are used if BLAST-like search pct identity is less than 97%%.
If %% identity is greater than 97%%, the result with most taxonomy levels is retained.
Arguments: -f, --fasta Input FASTA file (i.e. OTUs from ufits cluster) (Required)
-i, --otu_table Input OTU table file (i.e. otu_table from ufits cluster)
-o, --out Base name for output file. Default: ufits-taxonomy.<method>.txt
-d, --db Select Pre-installed database [ITS1, ITS2, ITS, 16S, LSU, COI]. Default: ITS2
-m, --mapping_file QIIME-like mapping file
--method Taxonomy method. Default: hybrid [utax, sintax, usearch, hybrid, rdp, blast]
--fasta_db Alternative database of fasta sequenes to use for global alignment.
--utax_db UTAX formatted database. Default: ITS2.udb [See configured DB's below]
--utax_cutoff UTAX confidence value threshold. Default: 0.8 [0 to 0.9]
--usearch_db USEARCH formatted database. Default: USEARCH.udb
--usearch_cutoff USEARCH threshold percent identity. Default 0.7
--sintax_cutoff SINTAX confidence value threshold. Default: 0.8 [0 to 0.9]
-r, --rdp Path to RDP Classifier. Required if --method rdp
--rdp_db RDP Classifer DB set. [fungalits_unite, fungalits_warcup. fungallsu, 16srrna]
--rdp_cutoff RDP Classifer confidence value threshold. Default: 0.8 [0 to 1.0]
--local_blast Local Blast database (full path) Default: NCBI remote nt database
--tax_filter Remove OTUs from OTU table that do not match filter, i.e. Fungi to keep only fungi.
-u, --usearch USEARCH executable. Default: usearch9
Databases Configured:
%s
""" % (sys.argv[1], version, db_print)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-assign_taxonomy.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'database':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Setup/Format reference database for ufits taxonomy command.
Arguments: -i, --fasta Input FASTA file (UNITE DB or UNITE+INSDC)
-o, --out Base Name for Output Files. Default: DB of ufits folder
-f, --fwd_primer Forward primer. Default: fITS7
-r, --rev_primer Reverse primer. Default: ITS4
--format Reformat FASTA headers to UTAX format. Default: unite2utax [unite2utax, rdp2utax, off]
--drop_ns Removal sequences that have > x N's. Default: 8
--create_db Create a DB. Default: usearch [utax, usearch]
--skip_trimming Keep full length sequences. Default: off
--derep_fulllength Remove identical sequences.
--primer_mismatch Max Primer Mismatch. Default: 4
--keep_all Keep Sequence if forward primer not found.
--cpus Number of CPUs to use. Default: all
-u, --usearch USEARCH executable. Default: usearch9
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-extract_region.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
try:
outLocation = arguments.index('-o')
except ValueError:
outLocation = arguments.index('--out')
outLocation = outLocation + 1
arguments[outLocation] = os.path.join(script_path, 'DB', arguments[outLocation])
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'summarize':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script traverses the taxonomy information and creates an OTU table for each
level of taxonomy, i.e. Kingdom, Phylum, Class, etc. Optionally, it will
create a Stacked Bar Graph for each taxonomy levels for each sample. Requires
Matplotlib, numpy, and pandas.
Arguments: -i, --table OTU Table containing Taxonomy information (Required)
-o, --out Base name for output files. Default: ufits-summary
--graphs Create stacked Bar Graphs.
--format Image output format. Default: eps [eps, svg, png, pdf]
--percent Convert numbers to Percent for Graphs. Default: off
--font_size Adjust font size for X-axis sample lables. Default: 8
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-summarize_taxonomy.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'install':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script downloads pre-formated databases for use with the `ufits taxonomy`
command. You can download databases for fungal ITS, bacterial 16S, fungal
LSU, or arthropod/chordate COI amplicons.
Arguments: -i Install Databases. Choices: ITS, 16S, LSU, COI
--force Over-write existing databases
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) < 1:
print help
sys.exit(1)
else:
if '-i' in arguments:
arguments.remove('-i')
if len(arguments) < 1:
print help
sys.exit(1)
for x in arguments:
if os.path.isfile(os.path.join(script_path, 'DB', x+'.udb')):
if not '--force' in arguments:
print("A formated database was found, to overwrite use '--force'. You can add more custom databases by using the `ufits database` command.")
sys.exit(1)
#download
if not x in URL:
if x == '--force':
continue
print "%s not valid, choices are ITS, 16S, LSU, COI" % x
sys.exit(1)
print "Downloading %s pre-formatted database" % x
address = URL.get(x)
download(address, x+'.ufits.tar.gz')
tfile = tarfile.open(x+'.ufits.tar.gz', 'r:gz')
tfile.extractall(x)
for file in os.listdir(x):
shutil.move(os.path.join(x,file), os.path.join(script_path, 'DB', file))
shutil.rmtree(x)
os.remove(x+'.ufits.tar.gz')
print "%s taxonomy database installed" % x
else:
print help
sys.exit(1)
elif sys.argv[1] == 'SRA':
help = """
Usage: ufits %s <arguments>
version: %s
Description: Script aids in submitted your data to NCBI Sequence Read Archive (SRA) by splitting FASTQ file from Ion, 454,
or Illumina by barcode sequence into separate files for submission to SRA. This ensures your data
is minimally processed as only barcodes are removed. Additionally, you can pass the --biosample argument
with an NCBI biosample tab-delimited file and the script will auto-populate an SRA submission file.
Arguments: -i, --input Input FASTQ file or folder (Required)
-o, --out Output base name. Default: sra
-b, --barcode_fasta Mulit-fasta file containing barcodes used.
-s, --biosample BioSample worksheet from NCBI (from confirmation email)
-p, --platform Sequencing platform. Defalt: ion (ion, illumina, 454)
-f, --fwd_primer Forward primer sequence. Default: fITS7
-r, --rev_primer Reverse primer sequence Default: ITS4
-n, --names CSV name mapping file, e.g. BC_1,NewName
-d, --description Paragraph description for SRA experimental design. Use quotes to wrap paragraph.
--min_len Minimum length read to keep after trimming barcodes. Default 50
---force Overwrite directory with same name
""" % (sys.argv[1], version)
arguments = sys.argv[2:]
if len(arguments) > 1:
cmd = os.path.join(script_path, 'bin', 'ufits-fastq2sra.py')
arguments.insert(0, cmd)
exe = sys.executable
arguments.insert(0, exe)
subprocess.call(arguments)
else:
print help
sys.exit(1)
elif sys.argv[1] == 'primers':
print "----------------------------------"
print "Primers hard-coded into UFITS:"
print "----------------------------------"
for k,v in ufitslib.primer_db.items():
print k.ljust(13) + v
print "----------------------------------"
sys.exit(1)
elif sys.argv[1] == 'version':
print "ufits v.%s" % version
else:
print "%s option not recognized" % sys.argv[1]
print default_help
sys.exit(1)
else:
print default_help
| 2.859375 | 3 |
evaluation_dataset/source/FST_creation/create_lang_fsts.py | gonenhila/codeswitching-lm | 10 | 12788661 |
import string
import itertools
from operator import add
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("--l1", default="en", help="name of language 1")
parser.add_argument("--l2", default="sp", help="name of language 2")
parser.add_argument("--probs_l1", default="../../data/probs/probs_en.txt", help="location of probs file for language 1")
parser.add_argument("--probs_l2", default="../../data/probs/probs_sp.txt", help="location of probs file for language 2")
parser.add_argument("--dict_l1", default="../../data/dictionaries/dict_en", help="location of dictionary file for language 1")
parser.add_argument("--dict_l2", default="../../data/dictionaries/dict_sp", help="location of dictionary file for language 2")
parser.add_argument("--single_char_l1", default="AI", help="a string with characters that are o.k in L1 on there own")
parser.add_argument("--single_char_l2", default="AEOUY", help="a string with characters that are o.k in L2 on there own")
parser.add_argument("--FST", default="../../data/FSTs/", help="location of created FSTs")
parser.add_argument("--mappings", default="../../data/mappings/mappings.json", help="location of mappings file")
args = parser.parse_args()
# extract unigram probabilities
def extract_unigrams(filename):
d_probs = {}
tot = 0
for l in open(filename):
w, p = l.split("\t")
d_probs[w] = float(p)
tot += float(p)
for w in d_probs:
d_probs[w] = d_probs[w]/tot
return d_probs
# make the additional changes to make both sets as similar as possible
def replace_phones(v, mapping):
v_all = []
v = v.split()
for p in v:
if p in mapping:
v_all.append(mapping[p])
else:
v_all.append(p)
return " ".join(v_all)
# create a python dictionary from the phonemes dictionary
def create_dict(map_phones, dict):
d = {}
for l in open(dict):
k, v = l.strip().split("\t", 1)
k = k.upper()
v = v.split("\t")
d[k] = []
for item in v:
d[k].append(replace_phones(item, map_phones))
return d
def write_to_files(i, diff, v, k , d_probs, f, f_inv, f_both, f_both_inv, lang):
if len(v) == 1:
f.write('(0 (0 {}__{} {} {}))\n'.format(k, lang, v[0], d_probs[k]))
f_both.write('(0 (0 {}__{} {} {}))\n'.format(k, lang, v[0], d_probs[k]))
f_inv.write('(0 (0 {} {}__{} {}))\n'.format(v[0], k, lang, d_probs[k]))
f_both_inv.write('(0 (0 {} {}__{} {}))\n'.format(v[0], k, lang, d_probs[k]))
if len(v) > 1:
l = len(v)
f.write('(0 ({} *e* {} {}))\n'.format(i+1, v[0], d_probs[k]))
f_both.write('(0 ({} *e* {} {}))\n'.format(i+diff+1, v[0], d_probs[k]))
f_inv.write('(0 ({} {} *e* {}))\n'.format(i+1, v[0], d_probs[k]))
f_both_inv.write('(0 ({} {} *e* {}))\n'.format(i+diff+1, v[0], d_probs[k]))
f.write('({} (0 {}__{} {}))\n'.format(i+l-1, k, lang, v[l-1]))
f_both.write('({} (0 {}__{} {}))\n'.format(i+diff+l-1, k, lang, v[l-1]))
f_inv.write('({} (0 {} {}__{}))\n'.format(i+l-1, v[l-1], k, lang))
f_both_inv.write('({} (0 {} {}__{}))\n'.format(i+diff+l-1, v[l-1], k, lang))
for j,syl in enumerate(v[1:-1]):
f.write('({} ({} *e* {}))\n'.format(i+j+1, i+j+2, syl))
f_both.write('({} ({} *e* {}))\n'.format(i+diff+j+1, i+diff+j+2, syl))
f_inv.write('({} ({} {} *e*))\n'.format(i+j+1, i+j+2, syl))
f_both_inv.write('({} ({} {} *e*))\n'.format(i+diff+j+1, i+diff+j+2, syl))
i = i + l - 1
return i
def write_lang_to_file(i, diff, d, d_probs, f, f_inv, f_l1_l2, f_l1_l2_inv, lang):
for k in d:
if d_probs[k] == 0:
continue
for v in d[k]:
v = v.split()
i = write_to_files(i, diff, v, k , d_probs, f, f_inv, f_l1_l2, f_l1_l2_inv, lang)
return i
# creates a file for FST in carmel
# This creates the FSTs from the dictionaries: l1, l2, l1+l2, and the inverted ones
# Each has edges with words, and it outputs the matching sequences of phones when a word is read (each phone on a separate edge)
# The inverted ones are opposite
def create_fsts(d_l1, d_l2, d_probs_l1, d_probs_l2):
with open(args.FST+args.l1, "w") as f_l1, open(args.FST+args.l1+"_inv", "w") as f_l1_inv, \
open(args.FST+args.l2, "w") as f_l2, open(args.FST+args.l2+"_inv", "w") as f_l2_inv, \
open(args.FST+args.l1+args.l2, "w") as f_l1_l2, open(args.FST+args.l1+args.l2+"_inv", "w") as f_l1_l2_inv:
f_l1.write("%%%% fst with separate phones from L1 dictionary %%%%\n0\n")
f_l1_inv.write("%%%% fst with separate phones from L1 dictionary - inverted %%%%\n0\n")
f_l2.write("%%%% fst with separate phones from L2 dictionary %%%%\n0\n")
f_l2_inv.write("%%%% fst with separate phones from L2 dictionary - inverted %%%%\n0\n")
f_l1_l2.write("%%%% fst with separate phones from L1+L2 dictionaries %%%%\n0\n")
f_l1_l2_inv.write("%%%% fst with separate phones from L1+L2 dictionaries - inverted %%%%\n0\n")
diff = write_lang_to_file(0, 0, d_l1, d_probs_l1, f_l1, f_l1_inv, f_l1_l2, f_l1_l2_inv, args.l1)
diff = write_lang_to_file(0, diff, d_l2, d_probs_l2, f_l2, f_l2_inv, f_l1_l2, f_l1_l2_inv, args.l2)
if __name__ == '__main__':
# extract unigram probabilities
d_probs_l1 = extract_unigrams(args.probs_l1)
d_probs_l2 = extract_unigrams(args.probs_l2)
# discard words than end with "." or with ")"
# discard words with one letter, except for a predefined list
for w in d_probs_l1:
if w.endswith(")") or w.endswith("."):
d_probs_l1[w] = 0
if len(w) == 1 and w not in args.single_char_l1:
d_probs_l1[w] = 0
for w in d_probs_l2:
if w.endswith(")") or w.endswith("."):
d_probs_l2[w] = 0
if len(w) == 1 and w not in args.single_char_l2:
d_probs_l2[w] = 0
if args.l1 == "en" and args.l2 == "sp":
with open(args.mappings, "r") as f:
mappings = json.load(f)
l2_l1_map, map_phones_l1, map_phones_l2 = mappings["l2_l1_map"], mappings["map_phones_l1"], mappings["map_phones_l2"]
else:
map_phones_l1 = map_phones_l2 = None
d_l1 = create_dict(map_phones_l1, args.dict_l1)
d_l2 = create_dict(map_phones_l2, args.dict_l2)
create_fsts(d_l1, d_l2, d_probs_l1, d_probs_l2)
| 3.015625 | 3 |
docs/source/conf.py | steven-lang/SPFlow | 199 | 12788662 | # -- Path setup --------------------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath("../../src"))
import sphinx_gallery
# -- Project information -----------------------------------------------------
project = "SPFlow"
copyright = "2020, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
author = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
# Get __version__ from _meta
from spn._meta import __version__
version = __version__
release = __version__
extensions = [
"sphinx.ext.linkcode",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"sphinx_gallery.gen_gallery",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
exclude_patterns = ["build", "Thumbs.db", ".DS_Store", "env"]
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
html_logo = "../../Documentation/logo/spflow_logoSquare.png"
# -- Extension configuration -------------------------------------------------
autosummary_generate = True
autodoc_default_options = {"undoc-members": None}
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"sklearn": ("https://scikit-learn.org/stable", None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Linkcode extension
def linkcode_resolve(domain, info):
if domain != "py":
return None
if not info["module"]:
return None
filename = info["module"].replace(".", "/")
return "https://github.com/SPFlow/SPFlow/blob/master/src/%s.py" % filename
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# sphinx_gallery.gen_gallery settings
sphinx_gallery_conf = {
"doc_module": "spn",
"backreferences_dir": os.path.join("generated"),
"reference_url": {"spn": None},
"remove_config_comments": True,
}
| 1.484375 | 1 |
scoreserver/scoreserver/highscore/views.py | petraszd/pyweek-14 | 0 | 12788663 | <filename>scoreserver/scoreserver/highscore/views.py
from django.http import HttpResponse
from django.core import serializers
from scoreserver.highscore.models import HighScore
from scoreserver.highscore.forms import HighScoreForm
def top10(request):
top = HighScore.objects.order_by('-score')[:10]
return HttpResponse(serializers.serialize('json', top))
def submit(request):
form = HighScoreForm(request.POST)
if request.POST and form.is_valid():
form.save()
return top10(request)
| 2.03125 | 2 |
convert.py | mindcruzer/rbc-statement-to-csv | 5 | 12788664 | <filename>convert.py
from datetime import datetime
import sys
import xml.etree.ElementTree as ET
import re
import csv
output_file = sys.argv[1]
input_files = sys.argv[2:]
txns = []
re_exchange_rate = re.compile(r'Exchange rate-([0-9]+\.[0-9]+)', re.MULTILINE)
re_foreign_currency = re.compile(r'Foreign Currency-([A-Z]+) ([0-9]+\.[0-9]+)', re.MULTILINE)
for input_file in input_files:
tree = ET.parse(input_file)
root = tree.getroot()
rows = []
print(f'Processing {input_file}...')
# Go through each page
for page in root:
# Txn rows are in the second figure
figure = page[1]
row = ''
last_x2 = None
# A row is a list of <text> tags, each containing a character
for tag in figure:
if tag.tag == 'text':
# Filter on text size to remove some of the noise
size = float(tag.attrib['size'])
x_pos = float(tag.attrib["bbox"].split(",")[0])
x2_pos = float(tag.attrib["bbox"].split(",")[2])
if last_x2 is not None:
if x2_pos < last_x2:
row += "\n"
if len(row) > 10 and (x_pos - last_x2) > 0.7:
row += " "
last_x2 = x2_pos
if int(size) in [6, 8]:
row += tag.text
elif tag.tag != 'text' and row != '':
# Row is over, start a new one
rows.append(row)
row = ''
last_x2 = None
# Get date range of the statement
date_range_regex = re.compile(r'^.*STATEMENT FROM ([A-Z]{3}) \d{2},? ?(\d{4})? TO ([A-Z]{3}) \d{2}, (\d{4})', re.MULTILINE)
date_range = {}
for row in rows:
if match := date_range_regex.search(row):
# Year for start month may not be specified if it's the same
# as the end month
date_range[match.group(1)] = match.group(2) or match.group(4)
date_range[match.group(3)] = match.group(4)
break
# Filter down to rows that are for transactions
MONTHS = {
'JAN',
'FEB',
'MAR',
'APR',
'MAY',
'JUN',
'JUL',
'AUG',
'SEP',
'OCT',
'NOV',
'DEC'
}
txn_rows = []
for row in rows:
# Match txn rows based on month of txn date and posting date
if len(row) >= 10:
month_1 = row[:3]
month_2 = row[5:8]
if month_1 in MONTHS and month_2 in MONTHS:
txn_rows.append(row)
# Parse and format the transaction data
for row in txn_rows:
date_1_month = row[:3]
date_1_day = row[3:5]
date_2_month = row[5:8]
date_2_day = row[8:10]
transaction_date = None
try:
transaction_date = datetime.strptime(f'{date_1_month}-{date_1_day}-{date_range[date_1_month]}', '%b-%d-%Y')
except KeyError:
# there is a strange case where the first date was before the days specified in date_range
# so just use the first year
first_year = min([int(year) for year in date_range.values()])
transaction_date = datetime.strptime(f'{date_1_month}-{date_1_day}-{first_year}', '%b-%d-%Y')
posting_date = datetime.strptime(f'{date_2_month}-{date_2_day}-{date_range[date_2_month]}', '%b-%d-%Y')
description, amount = row[10:].split('$')
if description.endswith('-'):
description = description[:-1]
amount = '-' + amount
# split desc after negative check, otherwise `-` gets left behind
description = description.split("\n")[0]
raw = row.strip()
amount = amount.replace(',', '').replace("\n", "")
match_exchange_rate = re_exchange_rate.search(raw)
match_foreign_currency = re_foreign_currency.search(raw)
txns.append({
'transaction_date': transaction_date,
'posting_date': posting_date,
'description': description,
'amount': amount,
'raw': raw,
'exchange_rate': match_exchange_rate.group(1) if match_exchange_rate else None,
'foreign_currency': match_foreign_currency.group(1) if match_foreign_currency else None,
'amount_foreign': match_foreign_currency.group(2) if match_foreign_currency else None,
})
txns = sorted(txns, key = lambda txn: txn['transaction_date'])
# Write as csv
with open(output_file, 'w', newline='') as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow([
'Transaction Date',
'Posting Date',
'Description',
'Amount',
'Amount Foreign Currency',
'Foreign Currency',
'Exchange Rate',
'Raw',
])
for txn in txns:
csv_writer.writerow([
txn['transaction_date'].strftime('%Y-%m-%d'),
txn['posting_date'].strftime('%Y-%m-%d'),
txn['description'],
txn['amount'],
txn['amount_foreign'],
txn['foreign_currency'],
txn['exchange_rate'],
txn['raw'],
])
| 2.78125 | 3 |
tests/test_events.py | trichter/sito | 18 | 12788665 | <reponame>trichter/sito
#!/usr/bin/env python
# by TR
from sito import Events
import os.path
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
self.path = os.path.dirname(__file__)
self.eventfile = os.path.join(self.path, 'data_temp', 'event_list.txt')
self.eventfile2 = os.path.join(self.path, 'data', 'NEIC_test.txt')
try:
self.events = Events.read(self.eventfile)
except IOError:
print ('Load events froms server...')
self.events = Events.load(min_datetime='2010-1-1', max_datetime='2011-1-1', min_magnitude=7.0)
self.events.write(self.eventfile)
def test_events_read_NEIC(self):
events3 = Events.read(self.eventfile2, regex=Events.regex_NEIC)
events4 = Events.read(self.eventfile2, regex=Events.regex_NEIC2)
self.assertEqual(len(events3), len(events4))
def test_events_IO(self):
eventfile3 = os.path.join(self.path, 'temp', 'test_events_IO.txt')
self.events[:3].write(eventfile3)
events2 = Events.read(eventfile3)
self.assertEqual(len(events2), 3)
def test_events_add(self):
events_add1 = self.events[:2]
events_add2 = self.events[2:4]
self.assertEqual(self.events[:4], events_add1 + events_add2)
def test_events_pick(self):
events2 = self.events.pick(latitude= -21., longitude= -69., minval=30, maxval=150, after='2010-05-10 12:00:00', bigger=7.5, replace=False)
self.assertEqual(len(events2), 1)
#print (self.events)
#print ('Some picked events:\n%s' % events2)
def test_events_plot(self):
#from pylab import figure, show
#self.events.plot(-22, -70, show=False)
#figure()
self.events.plot(-22, -70, lines=(0, 270), bigmap=True, radius='depth', color='datetime', show=False)
#show()
def suite():
return unittest.makeSuite(TestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 2.53125 | 3 |
mid_software/HoneywellInterface.py | bluthen/isadore_electronics | 0 | 12788666 | <reponame>bluthen/isadore_electronics<filename>mid_software/HoneywellInterface.py
# Copyright 2010-2019 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import serial
import struct
from crc_algorithms import Crc
# TODO: fold some of this into a general MODBUS class
# TODO: add CRC check when processing replies
class Honeywell3300ReadRegisterCmd:
"""Only class in this interface thus far. We are only reading from these units."""
def __init__(self,slaveAddy,functionCode,registerAddy,numRegisters):
self.slaveAddy = slaveAddy
self.functionCode = functionCode
self.registerAddy = registerAddy
self.numRegisters = numRegisters
self.crc = Crc(width=16,poly=0x8005,reflect_in=True,
xor_in=0xFFFF,reflect_out=True,
xor_out=0x0000)
self.rplyBytes = 0
self.rplyData = list() # list of 16 bit integer data
def performCRC(self,data):
return self.crc.bit_by_bit(data)
def createPacket(self):
buffer = struct.pack("B",self.slaveAddy)
buffer += struct.pack("B",self.functionCode)
buffer += struct.pack("BB",self.registerAddy[0],self.registerAddy[1])
buffer += struct.pack(">H",self.numRegisters)
buffer += struct.pack(">H",self.performCRC(buffer))
return buffer
def processReply(self,rplyBuf):
startPos = rplyBuf.tell()
rplySaveAddy = ord(rplyBuf.read(1))
rplyFunctionCode = ord(rplyBuf.read(1))
# TODO: addy and function code
self.rplyBytes = ord(rplyBuf.read(1))
# TODO: test length against expected length
for i in range(len(self.numRegisters)):
self.rplyData.append(int(struct.unpack(">H",rplyBuf.read(2))))
class Honeywell3300ReadSP_PVcmd(Honeywell3300ReadRegisterCmd):
FUNCTION_CODE = 0x03
REGISTER_ADDY = (0x00,0x00)
NUM_REGISTERS = 2
def __init__(self,slaveAddy,sensorIDs):
Honeywell3300ReadRegisterCmd.__init__(slaveAddy,self.FUNCTION_CODE,self.REGISTER_ADDY,self.NUM_REGISTERS)
self.sensorIDs = sensorIDs
def getPV(self):
return float(rplyData[0])/10.0
def getSP(self):
return float(rplyData[1])/10.0
def toWWWParam(self):
return (str(self.sensorIDs[0])+","+str(self.sensorIDs[1]),
str(self.getPV())+ ","+str(self.getSV()))
# Local Variables:
# indent-tabs-mode: t
# python-indent: 4
# tab-width: 4
# End:
| 2.296875 | 2 |
examples/python/Polyhedron_incremental_builder.py | sloriot/cgal-swig-bindings | 0 | 12788667 | <gh_stars>0
from __future__ import print_function
from CGAL.CGAL_Polyhedron_3 import Polyhedron_modifier
from CGAL.CGAL_Polyhedron_3 import Polyhedron_3
from CGAL.CGAL_Polyhedron_3 import ABSOLUTE_INDEXING
from CGAL.CGAL_Kernel import Point_3
# declare a modifier interfacing the incremental_builder
m = Polyhedron_modifier()
# define a triangle
m.begin_surface(3, 1)
m.add_vertex(Point_3(0, 0, 0))
m.add_vertex(Point_3(0, 1, 0))
m.add_vertex(Point_3(1, 0.5, 0))
m.begin_facet()
m.add_vertex_to_facet(0)
m.add_vertex_to_facet(1)
m.add_vertex_to_facet(2)
m.end_facet()
P = Polyhedron_3()
# create the triangle in P
P.delegate(m)
print("(v,f,e) = ", P.size_of_vertices(), P.size_of_facets(), divmod(P.size_of_halfedges(), 2)[0])
# clear the modifier
m.clear()
# define another triangle, reusing vertices in the polyhedron
m.begin_surface(1, 1, 0, ABSOLUTE_INDEXING)
m.add_vertex(Point_3(-1, 0.5, 0))
m.begin_facet()
m.add_vertex_to_facet(1)
m.add_vertex_to_facet(0)
m.add_vertex_to_facet(3)
m.end_facet()
# append a triangle incident to the existing one
P.delegate(m)
print("(v,f,e) = ", P.size_of_vertices(), P.size_of_facets(), divmod(P.size_of_halfedges(), 2)[0])
assert P.is_valid()
| 2.234375 | 2 |
python3/trec_car/__init__.py | flaviomartins/trec-car-tools | 39 | 12788668 | <gh_stars>10-100
"""__init__ module for trec-car-tools, imports all necessary functions for reading cbor data provided in the TREC CAR"""
__version__ = 1.0
__all__ = ['read_data', 'format_runs']
| 1.054688 | 1 |
wotd-tomorrow.py | mwbetrg/englishdb | 0 | 12788669 | <reponame>mwbetrg/englishdb<gh_stars>0
#!/usr/bin/python
#Created : Sat 25 Jul 2015 09:46:47 PM UTC
#Last Modified : Sat 25 Jul 2015 10:00:25 PM UTC
import os
import sys
#qpy:2
#qpy:console
import site
from peewee import *
import datetime
#database = SqliteDatabase('english-notes-exercises.sqlite', **{})
database = SqliteDatabase('/storage/extSdCard/englishdb/english-notes-exercises.sqlite', **{})
class BaseModel(Model):
class Meta:
database = database
class Iotd(BaseModel):
date = TextField(null=True)
idiom = TextField(unique=True)
meaning = TextField(null=True)
sentence = TextField(null=True)
class Meta:
db_table = 'iotd'
class Totd(BaseModel):
date = TextField(null=True)
issue = TextField(unique=True)
sentence = TextField(null=True)
class Meta:
db_table = 'totd'
class Wotd(BaseModel):
date = TextField(null=True)
meaning = TextField(null=True) #
part = TextField(null=True)
sentence = TextField(null=True)
word = TextField(unique=True)
class Meta:
db_table = 'wotd'
today = datetime.datetime.today()
tomorrow = today + datetime.timedelta(days=1)
esok = tomorrow.strftime("%Y%m%d")
w = Wotd.select().where(Wotd.date == esok)
for i in w:
print "\n["+i.date+"] "+i.word+" ("+i.meaning+") : "+i.sentence
| 2.84375 | 3 |
geokey/applications/tests/test_views.py | universityofsussex/geokey | 0 | 12788670 | """Tests for views of applications."""
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
from nose.tools import raises
from oauth2_provider.models import AccessToken
from rest_framework.test import APIRequestFactory
from geokey.projects.tests.model_factories import UserFactory
from ..views import (
ApplicationOverview, ApplicationCreate, ApplicationSettings,
ApplicationDelete, ApplicationConnected, ApplicationDisconnect
)
from ..models import Application
from .model_factories import ApplicationFactory
class ApplicationOverviewTest(TestCase):
def test_get_with_user(self):
view = ApplicationOverview.as_view()
url = reverse('admin:app_overview')
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request).render()
self.assertEqual(response.status_code, 200)
def test_get_with_anonymous(self):
view = ApplicationOverview.as_view()
url = reverse('admin:app_overview')
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request)
self.assertTrue(isinstance(response, HttpResponseRedirect))
class ApplicationConnectedTest(TestCase):
def test_get_with_user(self):
view = ApplicationConnected.as_view()
url = reverse('admin:app_connected')
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request).render()
self.assertEqual(response.status_code, 200)
def test_get_with_anonymous(self):
view = ApplicationConnected.as_view()
url = reverse('admin:app_connected')
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request)
self.assertTrue(isinstance(response, HttpResponseRedirect))
class ApplicationDisconnectTest(TestCase):
def setUp(self):
self.user = UserFactory.create()
self.app = ApplicationFactory.create()
self.token = AccessToken.objects.create(
user=self.user,
application=self.app,
token='df0af6a395b4cd072445b3832e9379bfee257da0',
scope=1,
expires='2030-12-31T23:59:01+00:00'
)
@raises(AccessToken.DoesNotExist)
def test_get_with_user(self):
view = ApplicationDisconnect.as_view()
url = reverse('admin:app_disconnect', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
request.user = self.user
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
AccessToken.objects.get(pk=self.token.id)
def test_get_with_anonymous(self):
view = ApplicationDisconnect.as_view()
url = reverse('admin:app_disconnect', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertIsNotNone(AccessToken.objects.get(pk=self.token.id))
def test_get_with_unconnected_user(self):
view = ApplicationDisconnect.as_view()
url = reverse('admin:app_disconnect', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
class ApplicationCreateTest(TestCase):
def test_get_with_user(self):
view = ApplicationCreate.as_view()
url = reverse('admin:app_register')
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request).render()
self.assertEqual(response.status_code, 200)
def test_get_with_anonymous(self):
view = ApplicationCreate.as_view()
url = reverse('admin:app_register')
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request)
self.assertTrue(isinstance(response, HttpResponseRedirect))
def test_post_with_user(self):
data = {
'name': '<NAME>',
'description:': '',
'download_url': 'http://example.com',
'redirect_uris': 'http://example.com',
'authorization_grant_type': 'password',
'skip_authorization': False,
}
view = ApplicationCreate.as_view()
url = reverse('admin:app_register')
request = APIRequestFactory().post(url, data)
request.user = UserFactory.create()
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(Application.objects.count(), 1)
def test_post_with_anonymous(self):
data = {
'name': '<NAME>',
'description': '',
'download_url': 'http://example.com',
'redirect_uris': 'http://example.com',
'authorization_grant_type': 'password',
'skip_authorization': False,
}
view = ApplicationCreate.as_view()
url = reverse('admin:app_register')
request = APIRequestFactory().post(url, data)
request.user = AnonymousUser()
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(Application.objects.count(), 0)
class ApplicationSettingsTest(TestCase):
def setUp(self):
self.creator = UserFactory.create()
self.app = ApplicationFactory.create(**{'user': self.creator})
def test_get_with_creator(self):
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = self.creator
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertNotContains(
response,
'You are not the owner of this application and therefore not'
'allowed to access this app.'
)
def test_get_with_user(self):
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'You are not the owner of this application and therefore not '
'allowed to access this app.'
)
def test_get_with_anonymous(self):
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
def test_post_with_creator(self):
data = {
'name': '<NAME>',
'description': '',
'download_url': 'http://example.com',
'redirect_uris': 'http://example.com',
'authorization_grant_type': 'password',
'skip_authorization': True,
}
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().post(url, data)
request.user = self.creator
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertNotContains(
response,
'You are not the owner of this application and therefore not'
'allowed to access this app.'
)
ref = Application.objects.get(pk=self.app.id)
self.assertEqual(ref.name, data.get('name'))
self.assertEqual(ref.description, data.get('description'))
self.assertEqual(ref.download_url, data.get('download_url'))
self.assertEqual(ref.redirect_uris, data.get('redirect_uris'))
self.assertEqual(
ref.authorization_grant_type,
data.get('authorization_grant_type')
)
def test_post_with_user(self):
data = {
'name': '<NAME>',
'description': '',
'download_url': 'http://example.com/download',
'redirect_uris': 'http://example.com/redirect',
'authorization_grant_type': 'password',
'skip_authorization': True,
}
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().post(url, data)
request.user = UserFactory.create()
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'You are not the owner of this application and therefore not '
'allowed to access this app.'
)
ref = Application.objects.get(pk=self.app.id)
self.assertNotEqual(ref.name, data.get('name'))
self.assertNotEqual(ref.description, data.get('description'))
self.assertNotEqual(ref.download_url, data.get('download_url'))
self.assertNotEqual(ref.redirect_uris, data.get('redirect_uris'))
self.assertNotEqual(
ref.authorization_grant_type,
data.get('authorization_grant_type')
)
def test_post_with_anonymous(self):
data = {
'name': '<NAME>',
'description': '',
'download_url': 'http://example.com/download',
'redirect_uris': 'http://example.com/redirect',
'authorization_grant_type': 'password',
'skip_authorization': True,
}
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().post(url, data)
request.user = AnonymousUser()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
ref = Application.objects.get(pk=self.app.id)
self.assertNotEqual(ref.name, data.get('name'))
self.assertNotEqual(ref.description, data.get('description'))
self.assertNotEqual(ref.download_url, data.get('download_url'))
self.assertNotEqual(ref.redirect_uris, data.get('redirect_uris'))
self.assertNotEqual(
ref.authorization_grant_type,
data.get('authorization_grant_type')
)
class ApplicationDeleteTest(TestCase):
def setUp(self):
self.creator = UserFactory.create()
self.app = ApplicationFactory.create(**{'user': self.creator})
def test_get_with_creator(self):
view = ApplicationDelete.as_view()
url = reverse('admin:app_delete', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
request.user = self.creator
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
def test_get_with_user(self):
view = ApplicationDelete.as_view()
url = reverse('admin:app_delete', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'You are not the owner of this application and therefore not '
'allowed to access this app.'
)
def test_get_with_anonymous(self):
view = ApplicationDelete.as_view()
url = reverse('admin:app_delete', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
| 2.296875 | 2 |
gluoncv/model_zoo/center_net/deconv_resnet.py | JSoothe/gluon-cv | 48 | 12788671 | <gh_stars>10-100
"""ResNet with Deconvolution layers for CenterNet object detection."""
# pylint: disable=unused-argument
from __future__ import absolute_import
import warnings
import math
import mxnet as mx
from mxnet.context import cpu
from mxnet.gluon import nn
from mxnet.gluon import contrib
from .. model_zoo import get_model
__all__ = ['DeconvResnet', 'get_deconv_resnet',
'resnet18_v1b_deconv', 'resnet18_v1b_deconv_dcnv2',
'resnet50_v1b_deconv', 'resnet50_v1b_deconv_dcnv2',
'resnet101_v1b_deconv', 'resnet101_v1b_deconv_dcnv2']
class BilinearUpSample(mx.init.Initializer):
"""Initializes weights as bilinear upsampling kernel.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module',
initialize weights to bilinear upsample...
>>> init = mx.initializer.BilinearUpSample()
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 0. 0. 0.]]
"""
def __init__(self):
super(BilinearUpSample, self).__init__()
def _init_weight(self, _, arr):
mx.nd.random.normal(0, 0.01, arr.shape, out=arr)
f = math.ceil(arr.shape[2] / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(arr.shape[2]):
for j in range(arr.shape[3]):
arr[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, arr.shape[0]):
arr[c, 0, :, :] = arr[0, 0, :, :]
class DeconvResnet(nn.HybridBlock):
"""Deconvolutional ResNet.
Parameters
----------
base_network : str
Name of the base feature extraction network.
deconv_filters : list of int
Number of filters for deconv layers.
deconv_kernels : list of int
Kernel sizes for deconv layers.
pretrained_base : bool
Whether load pretrained base network.
norm_layer : mxnet.gluon.nn.HybridBlock
Type of Norm layers, can be BatchNorm, SyncBatchNorm, GroupNorm, etc.
norm_kwargs : dict
Additional kwargs for `norm_layer`.
use_dcnv2 : bool
If true, will use DCNv2 layers in upsampling blocks
"""
def __init__(self, base_network='resnet18_v1b',
deconv_filters=(256, 128, 64), deconv_kernels=(4, 4, 4),
pretrained_base=True, norm_layer=nn.BatchNorm, norm_kwargs=None,
use_dcnv2=False, **kwargs):
super(DeconvResnet, self).__init__(**kwargs)
assert 'resnet' in base_network
net = get_model(base_network, pretrained=pretrained_base)
self._norm_layer = norm_layer
self._norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
self._use_dcnv2 = use_dcnv2
if 'v1b' in base_network:
feat = nn.HybridSequential()
feat.add(*[net.conv1,
net.bn1,
net.relu,
net.maxpool,
net.layer1,
net.layer2,
net.layer3,
net.layer4])
self.base_network = feat
else:
raise NotImplementedError()
with self.name_scope():
self.deconv = self._make_deconv_layer(deconv_filters, deconv_kernels)
def _get_deconv_cfg(self, deconv_kernel):
"""Get the deconv configs using presets"""
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
else:
raise ValueError('Unsupported deconvolution kernel: {}'.format(deconv_kernel))
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_filters, num_kernels):
# pylint: disable=unused-variable
"""Make deconv layers using the configs"""
assert len(num_kernels) == len(num_filters), \
'Deconv filters and kernels number mismatch: {} vs. {}'.format(
len(num_filters), len(num_kernels))
layers = nn.HybridSequential('deconv_')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.base_network.initialize()
in_planes = self.base_network(mx.nd.zeros((1, 3, 256, 256))).shape[1]
for planes, k in zip(num_filters, num_kernels):
kernel, padding, output_padding = self._get_deconv_cfg(k)
if self._use_dcnv2:
assert hasattr(contrib.cnn, 'ModulatedDeformableConvolution'), \
"No ModulatedDeformableConvolution found in mxnet, consider upgrade..."
layers.add(contrib.cnn.ModulatedDeformableConvolution(planes,
kernel_size=3,
strides=1,
padding=1,
dilation=1,
num_deformable_group=1,
in_channels=in_planes))
else:
layers.add(nn.Conv2D(channels=planes,
kernel_size=3,
strides=1,
padding=1,
in_channels=in_planes))
layers.add(self._norm_layer(momentum=0.9, **self._norm_kwargs))
layers.add(nn.Activation('relu'))
layers.add(nn.Conv2DTranspose(channels=planes,
kernel_size=kernel,
strides=2,
padding=padding,
output_padding=output_padding,
use_bias=False,
in_channels=planes,
weight_initializer=BilinearUpSample()))
layers.add(self._norm_layer(momentum=0.9, **self._norm_kwargs))
layers.add(nn.Activation('relu'))
in_planes = planes
return layers
def hybrid_forward(self, F, x):
# pylint: disable=arguments-differ
"""HybridForward"""
y = self.base_network(x)
out = self.deconv(y)
return out
def get_deconv_resnet(base_network, pretrained=False, ctx=cpu(), use_dcnv2=False, **kwargs):
"""Get resnet with deconv layers.
Parameters
----------
base_network : str
Name of the base feature extraction network.
pretrained : bool
Whether load pretrained base network.
ctx : mxnet.Context
mx.cpu() or mx.gpu()
use_dcnv2 : bool
If true, will use DCNv2 layers in upsampling blocks
pretrained : type
Description of parameter `pretrained`.
Returns
-------
get_deconv_resnet(base_network, pretrained=False,
Description of returned object.
"""
net = DeconvResnet(base_network=base_network, pretrained_base=pretrained,
use_dcnv2=use_dcnv2, **kwargs)
with warnings.catch_warnings(record=True) as _:
warnings.simplefilter("always")
net.initialize()
net.collect_params().reset_ctx(ctx)
return net
def resnet18_v1b_deconv(**kwargs):
"""Resnet18 v1b model with deconv layers.
Returns
-------
HybridBlock
A Resnet18 v1b model with deconv layers.
"""
kwargs['use_dcnv2'] = False
return get_deconv_resnet('resnet18_v1b', **kwargs)
def resnet18_v1b_deconv_dcnv2(**kwargs):
"""Resnet18 v1b model with deconv layers and deformable v2 conv layers.
Returns
-------
HybridBlock
A Resnet18 v1b model with deconv layers and deformable v2 conv layers.
"""
kwargs['use_dcnv2'] = True
return get_deconv_resnet('resnet18_v1b', **kwargs)
def resnet50_v1b_deconv(**kwargs):
"""Resnet50 v1b model with deconv layers.
Returns
-------
HybridBlock
A Resnet50 v1b model with deconv layers.
"""
kwargs['use_dcnv2'] = False
return get_deconv_resnet('resnet50_v1b', **kwargs)
def resnet50_v1b_deconv_dcnv2(**kwargs):
"""Resnet50 v1b model with deconv layers and deformable v2 conv layers.
Returns
-------
HybridBlock
A Resnet50 v1b model with deconv layers and deformable v2 conv layers.
"""
kwargs['use_dcnv2'] = True
return get_deconv_resnet('resnet50_v1b', **kwargs)
def resnet101_v1b_deconv(**kwargs):
"""Resnet101 v1b model with deconv layers.
Returns
-------
HybridBlock
A Resnet101 v1b model with deconv layers.
"""
kwargs['use_dcnv2'] = False
return get_deconv_resnet('resnet101_v1b', **kwargs)
def resnet101_v1b_deconv_dcnv2(**kwargs):
"""Resnet101 v1b model with deconv layers and deformable v2 conv layers.
Returns
-------
HybridBlock
A Resnet101 v1b model with deconv layers and deformable v2 conv layers.
"""
kwargs['use_dcnv2'] = True
return get_deconv_resnet('resnet101_v1b', **kwargs)
| 2.203125 | 2 |
tests/pirateplayer/test_library.py | TestDotCom/pirateplayer | 12 | 12788672 | <gh_stars>10-100
from unittest import TestCase
from unittest.mock import MagicMock, patch
from pirateplayer.library import Library
class TestLibrary(TestCase):
def setUp(self):
self._root = '~/Music'
dirtree = [
(self._root, ('Gorillaz', 'Daft Punk'), ('test.ogg')),
('Gorillaz', ('Song Machine', 'Plastic Beach'), ()),
('Song Machine', (), ('Desole.flac', 'Aries.flac')),
('Daft Punk', ('Discovery'), ()),
('Discovery', (), ('One More Time.flac'))
]
self._mock_root = patch('pirateplayer.utils.confparse.get_root', return_value=self._root)
self._mockWalk = patch('os.walk', return_value=dirtree)
def test_init_library(self):
expected_filetree = {
self._root + '/' : sorted(('Gorillaz/', 'Daft Punk/', 'test.ogg')),
'Gorillaz/' : sorted(('Song Machine/', 'Plastic Beach/')),
'Song Machine/' : sorted(('Desole.flac', 'Aries.flac')),
'Daft Punk/' : ['Discovery/'],
'Discovery/' : ['One More Time.flac']
}
with self._mock_root:
with self._mockWalk:
library = Library()
self.assertEqual(library._filetree, expected_filetree)
def test_list_files(self):
expected_list = sorted(('Gorillaz/', 'Daft Punk/', 'test.ogg'))
with self._mock_root:
with self._mockWalk:
library = Library()
self.assertEqual(library.list_files(), expected_list)
def test_retrieve_file(self):
expected_file = ['Daft Punk/']
with self._mock_root:
with self._mockWalk:
library = Library()
self.assertEqual(library.retrieve_file(0).names, expected_file)
def test_browse_up(self):
with self._mock_root:
expected_path = [self._root + '/']
with self._mockWalk:
library = Library()
library.retrieve_file(0)
library.browse_up()
self.assertEqual(library._dirpath, expected_path)
| 2.609375 | 3 |
tests/runtests.py | yasserglez/pytiger2c | 2 | 12788673 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Script para compilar y ejecutar los programas de prueba.
"""
import os
import unittest
import subprocess
SRC_DIR = os.path.join(os.path.dirname(__file__), os.pardir)
PYTIGER2C_SCRIPT = os.path.abspath(os.path.join(SRC_DIR, 'scripts', 'pytiger2c.py'))
PYTIGER2C_CMD = ['python', PYTIGER2C_SCRIPT]
TESTS_DIR = os.path.abspath(os.path.join(SRC_DIR, 'tests'))
SUCCCESS_DIR = os.path.abspath(os.path.join(TESTS_DIR, 'success'))
FAIL_DIR = os.path.abspath(os.path.join(TESTS_DIR, 'fail'))
class TigerTestCase(unittest.TestCase):
"""
Clase base para ambos tipos de pruebas.
"""
def __init__(self, parent_dir, tiger_file):
"""
Inicializa la prueba.
"""
super(TigerTestCase, self).__init__()
self._tiger_file = os.path.join(parent_dir, tiger_file)
self._exec_file = os.path.join(parent_dir, tiger_file[:-4])
self._pytiger2c_cmd = PYTIGER2C_CMD + [self._tiger_file, '--output', self._exec_file]
self._in_file = os.path.join(parent_dir, tiger_file[:-4] + '.in')
if not os.path.isfile(self._in_file):
self._in_file = None
self._out_file = os.path.join(parent_dir, tiger_file[:-4] + '.out')
if not os.path.isfile(self._out_file):
self._out_file = None
self._err_file = os.path.join(parent_dir, tiger_file[:-4] + '.err')
if not os.path.isfile(self._err_file):
self._err_file = None
self._tmp_file = os.path.join(parent_dir, tiger_file[:-4] + '.tmp')
def shortDescription(self):
"""
Retorna una descripción corta de la prueba.
"""
return os.path.basename(self._tiger_file)
def failIfDifferent(self, first_file, second_file):
"""
Falla si los archivos son diferentes.
"""
diff_cmd = ['diff', first_file, second_file]
if subprocess.call(diff_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) != 0:
self.fail('Output does not match!')
def tearDown(self):
"""
Limpia el ambiente luego de ejecutar la prueba.
"""
if os.path.isfile(self._exec_file):
os.remove(self._exec_file)
if os.path.isfile(self._tmp_file):
os.remove(self._tmp_file)
class SuccessTigerTestCase(TigerTestCase):
"""
Representa una prueba de éxito.
El programa Tiger utilizado en esta prueba deberá compilarse sin errores
y al ejecutarse, recibiendo el archivo .in como entrada standard, su
salida debe coincidir con el contenido del archivo .out.
"""
def runTest(self):
"""
Ejecuta la prueba.
"""
# Compile the program.
pytiger2c_ret = subprocess.call(self._pytiger2c_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if pytiger2c_ret != 0 or not os.path.isfile(self._exec_file):
self.fail('Compilation failed!')
# Execute the program.
exec_stdin = open(self._in_file) if self._in_file else None
if self._out_file is not None:
out_file = self._out_file
with open(self._tmp_file, 'w') as exec_stdout:
subprocess.call([self._exec_file], stdin=exec_stdin, stdout=exec_stdout, stderr=subprocess.PIPE)
elif self._err_file is not None:
out_file = self._err_file
with open(self._tmp_file, 'w') as exec_stderr:
subprocess.call([self._exec_file], stdin=exec_stdin, stdout=subprocess.PIPE, stderr=exec_stderr)
else:
out_file = None
if exec_stdin is not None:
exec_stdin.close()
if out_file is not None:
# Compare the output of the program.
self.failIfDifferent(self._tmp_file, out_file)
class FailTigerTestCase(TigerTestCase):
"""
Representa una prueba de fallo.
PyTiger2C deberá fallar al intentar compilar el programa Tiger utilizado en
esta prueba y el mensaje de error que imprima en la salida estándard de
errores deberá coincidir con el contenido del archivo .err.
"""
def runTest(self):
"""
Ejecuta la prueba.
"""
# Try to compile the program.
with open(self._tmp_file, 'w') as pytiger2c_stderr:
pytiger2c_ret = subprocess.call(self._pytiger2c_cmd, stdout=subprocess.PIPE, stderr=pytiger2c_stderr)
if pytiger2c_ret != 1:
self.fail('Compilation succeded and it should fail!')
# Compare the error output.
self.failIfDifferent(self._tmp_file, self._err_file)
def main():
"""
Función principal del script.
"""
suite = unittest.TestSuite()
runner = unittest.TextTestRunner(verbosity=2);
if os.path.isdir(SUCCCESS_DIR):
for tiger_file in [f for f in os.listdir(SUCCCESS_DIR) if f.endswith('.tig')]:
test_case = SuccessTigerTestCase(SUCCCESS_DIR, tiger_file)
suite.addTest(test_case)
if os.path.isdir(FAIL_DIR):
for tiger_file in [f for f in os.listdir(FAIL_DIR) if f.endswith('.tig')]:
test_case = FailTigerTestCase(FAIL_DIR, tiger_file)
suite.addTest(test_case)
runner.run(suite)
if __name__ == '__main__':
main()
| 2.390625 | 2 |
BOJ10823.py | INYEONGKIM/BOJ | 2 | 12788674 | <reponame>INYEONGKIM/BOJ<gh_stars>1-10
res=""
while True:
try:
res+=input()
except EOFError:
break
l=map(int, res.split(","))
print(sum(l))
| 2.453125 | 2 |
accountifie/cal/models.py | imcallister/accountifie | 4 | 12788675 | <reponame>imcallister/accountifie<filename>accountifie/cal/models.py
"""Date objects all nicely joinable together to allow SQL GROUP BY.
The calendar actually gets constructed by functions in __init__.py
"""
from datetime import date
from django.db import models
class Year(models.Model):
id = models.IntegerField(primary_key=True, help_text="4 digit year e.g. 2009")
def __unicode__(self):
return str(self.id)
class Quarter(models.Model):
id = models.CharField(primary_key=True, max_length=6, help_text='e.g. "2009Q1"')
year = models.ForeignKey(Year, db_index=True, on_delete=models.CASCADE)
def __unicode__(self):
return self.id
class Month(models.Model):
id = models.CharField(primary_key=True, max_length=7, help_text='e.g. "2009M04"')
quarter = models.ForeignKey(Quarter, db_index=True, on_delete=models.CASCADE)
year = models.ForeignKey(Year, db_index=True, on_delete=models.CASCADE)
def __unicode__(self):
return str(self.id)
def first_of_month(self):
yyyy, mm = self.id.split('M')
return date(int(yyyy), int(mm), 1)
def day_of_month(self, day):
yyyy, mm = self.id.split('M')
return date(int(yyyy), int(mm), day)
class Week(models.Model):
"Identified by YYYYWNN e.g. 2007W29"
id = models.CharField(primary_key=True, max_length=7)
first_day = models.DateField(null=True) #can be last year, hard to initialize in a new system
last_day = models.DateField(null=True) #can be next year, hard to initialize at end of year
start_month = models.ForeignKey(Month, related_name='start_week_set', on_delete=models.CASCADE)
end_month = models.ForeignKey(Month, related_name='end_week_set', on_delete=models.CASCADE)
year = models.ForeignKey(Year, on_delete=models.CASCADE)
def __unicode__(self):
return str(self.id)
class Day(models.Model):
id = models.DateField(primary_key=True)
month = models.ForeignKey(Month, db_index=True, on_delete=models.CASCADE)
quarter = models.ForeignKey(Quarter, db_index=True, on_delete=models.CASCADE)
year = models.ForeignKey(Year, db_index=True, on_delete=models.CASCADE)
def __unicode__(self):
return '%04d-%02d-%02d' % (self.id.year, self.id.month, self.id.day)
| 2.53125 | 3 |
softlabels/Deepfashion2-Faster-RCNN/eval_RCNN.py | bsridatta/robotfashion | 0 | 12788676 | <reponame>bsridatta/robotfashion
import matplotlib
import matplotlib.pyplot as plt
import transforms as T
import torch
import numpy as np
import cv2
import random
import datetime
import pickle
import time
import errno
import os
import re
import json
import itertools
import utils
from config import *
#############################################################################
# Main function for testing and evaluating images using Faster RCNN
#############################################################################
def eval_RCNN(model, api):
if weight_path is not None:
device = None
if use_cuda:
device = 'cuda'
else:
device = 'cpu'
checkpoint = torch.load(weight_path, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
if use_cuda:
model = model.cuda()
for img in images_to_eval:
try:
print("annotating: " + img)
api(model, img, COCO_INSTANCE_CATEGORY_NAMES, detection_confidence)
except:
print("oops! No matches found in: " + img)
def instance_bbox_api(model, img_path, cat_names, threshold=0.5, rect_th=3, text_size=1, text_th=2):
boxes, pred_cls, pred_id = utils.get_prediction(model, img_path, cat_names, threshold)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i in range(len(boxes)):
cv2.rectangle(img, boxes[i][0], boxes[i][1],color=(0, 255, 0), thickness=rect_th)
cv2.putText(img,pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th)
if is_interactive:
matplotlib.use('TkAgg')
plt.figure(figsize=(20,30))
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.show(block=True)
save_annos(boxes, pred_cls, pred_id, img_path, img)
def save_annos(boxes, pred_cls, pred_id, img_path, img):
annos = {"source": "user", "pair_id": 1}
for i in range(len(boxes)):
boxes[i] = list(itertools.chain.from_iterable(boxes[i]))
boxes[i] = [int(val) for val in boxes[i]]
item_data = {"scale": 1, "viewpoint": 2,
"zoom_in": 1, "style": 1, "occlusion": 2,
"bounding_box": boxes[i], "category_id": int(pred_id[i]), "category_name": pred_cls[i]}
annos.update({"item" + str(i + 1): item_data})
json_name = re.search(".+/(.+)\.(jpg|jpeg|png)", img_path).group(1)
ext = re.search(".+/(.+)\.(jpg|jpeg|png)", img_path).group(2)
if not os.path.exists(save_annos_dir + 'json/'):
os.makedirs(save_annos_dir + 'json/')
if not os.path.exists(save_annos_dir + 'img/'):
os.makedirs(save_annos_dir + 'img/')
with open(save_annos_dir + 'json/' + json_name + ".json", 'w') as f:
json.dump(annos, f)
plt.imsave(save_annos_dir + 'img/' + json_name + '_annotated.' + ext, img)
| 2.25 | 2 |
greedy/dot_product.py | younes-assou/some-data-structures-and-algos | 0 | 12788677 | def max_dot_product(a, b):
#write your code here
res = 0
a=sorted(a)
b=sorted(b)
for i in range(len(a)):
res += a[i] * b[i]
return res
n = int(input())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
print(max_dot_product(a, b))
| 3.765625 | 4 |
rlf/Steer1.py | richardlford/pyev3dev2 | 0 | 12788678 | #!/usr/bin/env python3
from ev3dev2.motor import MoveSteering, MediumMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C
from ev3dev2.sensor.lego import TouchSensor
from time import sleep
ts = TouchSensor()
steer_pair = MoveSteering(OUTPUT_A, OUTPUT_B)
mm = MediumMotor(OUTPUT_C)
mm.on(speed=100)
#teer_pair.on_for_rotations(steering=-20, speed=75, rotations=10)
steer_pair.on_for_degrees(steering=-100, speed=100, degrees=1440)
#while not ts.is_pressed: # while touch sensor is not pressed
# sleep(0.01)
mm.off()
steer_pair.off()
sleep(5)
| 2.828125 | 3 |
coviduci/db/test_sqlite.py | nazareno/covid-icu-monitor | 1 | 12788679 | <gh_stars>1-10
import os
import time
from absl.testing import absltest
from coviduci.db import sqlite
import sqlite3
import tempfile
class SQLiteDBTest(absltest.TestCase):
def test_init(self):
with tempfile.TemporaryDirectory() as tmp_folder:
sqldb = sqlite.SQLiteDB(os.path.join(tmp_folder, "test.db"))
def test_icu_creation(self):
with tempfile.TemporaryDirectory() as tmp_folder:
sqldb = sqlite.SQLiteDB(os.path.join(tmp_folder, "test.db"))
sqldb.upsert_icu("ICU1", "dep1", "city1", 3.44, 42.3, "0102")
icus = sqldb.get_icus()
self.assertEqual(icus[icus["icu_name"] == "ICU1"].iloc[0]["dept"], "dep1")
sqldb.upsert_icu("ICU2", "dep2", "city2", 3.44, 42.3)
icus = sqldb.get_icus()
self.assertEqual(icus[icus["icu_name"] == "ICU2"].iloc[0]["dept"], "dep2")
sqldb.upsert_icu("ICU1", "dep3", "city3", 3.44, 42.3, "0103")
icus = sqldb.get_icus()
self.assertEqual(icus[icus["icu_name"] == "ICU1"].iloc[0]["dept"], "dep3")
self.assertEqual(icus[icus["icu_name"] == "ICU1"].iloc[0]["telephone"], "0103")
self.assertEqual(sqldb.get_icu_id_from_name("ICU1"), 1)
self.assertEqual(sqldb.get_icu_id_from_name("ICU2"), 2)
def test_user_creation(self):
with tempfile.TemporaryDirectory() as tmp_folder:
sqldb = sqlite.SQLiteDB(os.path.join(tmp_folder, "test.db"))
# Make sure you can't add a user with non-existant ICU
with self.assertRaises(ValueError):
sqldb.add_user("ICU1", "Bob", "+33698158092", "Chercheur")
# Check normal insertion
sqldb.upsert_icu("ICU1", "dep1", "city1", 3.44, 42.3, "0102")
sqldb.add_user("ICU1", "Bob", "+33698158092", "Chercheur")
with self.assertRaises(sqlite3.IntegrityError):
sqldb.add_user("ICU1", "Bob", "+33698158092", "Chercheur")
users = sqldb.get_users()
def test_bedcount_update(self):
with tempfile.TemporaryDirectory() as tmp_folder:
sqldb = sqlite.SQLiteDB(os.path.join(tmp_folder, "test.db"))
# Make sure you can't insert without a valid icu_id
with self.assertRaises(ValueError):
sqldb.update_bedcount(1, "test", 10, 9, 8, 7, 6, 5, 4)
sqldb.upsert_icu("ICU1", "dep1", "city1", 3.44, 42.3, "0102")
sqldb.upsert_icu("ICU2", "dep1", "city1", 3.44, 42.3, "0102")
# Generate some bed updates:
for i in [1, 2]:
for j in range(10):
time.sleep(0.01)
sqldb.update_bedcount(i, "test", 10, 9, 8, 7, 6, 5, 4)
bedcount = sqldb.get_bedcount()
self.assertLen(bedcount, 2)
# Make sure the returned updates are the most recent
for i in [1, 2]:
res = sqldb.pd_execute(
f"SELECT MAX(update_ts) as max_ts FROM bed_updates WHERE icu_id = {i}"
)
max_ts = res.iloc[0]["max_ts"]
self.assertEqual(
bedcount[bedcount["icu_id"] == i].iloc[0]["update_ts"], max_ts
)
if __name__ == "__main__":
absltest.main()
| 2.71875 | 3 |
tests/rep/gcd.py | GillesArcas/cws | 13 | 12788680 | <filename>tests/rep/gcd.py
# greatest common divisor
def gcd(a, b):
c = 1
while c != 0:
c = a % b
if c == 0:
return b
else:
a = b
b = c
a = 8136
b = 492
print(gcd(a, b))
| 3.265625 | 3 |
sensors/cmxdevice.py | tingxin/DevIoT_IndoorLocation_Starter_Kit | 0 | 12788681 | <gh_stars>0
__author__ = 'tingxxu'
import sys
from DevIoTGateway.sensor import Sensor, SProperty, SSetting
from DevIoTGateway.config import config
from logic.sensorlogic import SensorLogic
floor = config["service"]["map_name"]
default_location = {"location": "others"}
areas = []
for area in config["areas"]:
areas.append(area)
cmxdevice = Sensor("cmxdevice", "cmxdevice_1", "Personal Device")
location_property = SProperty("location", 1, [], areas[0]["name"])
location_property.description = "location of the device in %s" % floor
cmxdevice.add_property(location_property)
for area in areas:
location_property.range.append(area["name"])
x_property = SProperty("x", 0, None, 0)
x_property.description = "x-coordinate of the device in %s" % floor
cmxdevice.add_property(x_property)
y_property = SProperty("y", 0, None, 0)
y_property.description = "y-coordinate of the device in %s" % floor
cmxdevice.add_property(y_property)
mac_address_setting = SSetting("mac_address", 1, None, "34:a3:95:90:25:89", True)
mac_address_setting.description = "the ip address of user's device"
cmxdevice.add_setting(mac_address_setting)
mapCoordinate = "mapCoordinate"
mapInfo = "mapInfo"
class CmxdeviceLogic(SensorLogic):
modify_key = "settings"
@staticmethod
def modify(sensor, data):
if data['id'] == sensor.id:
if CmxdeviceLogic.modify_key in data and data[CmxdeviceLogic.modify_key] is not None:
updated_settings = {}
for d_setting in data[CmxdeviceLogic.modify_key]:
updated_settings[d_setting["name"]] = d_setting["value"]
sensor.update_settings(updated_settings)
return True
return False
@staticmethod
def update(sensor, data):
if data is not None:
for device in data:
try:
if device['ipAddress'] is None:
continue
same_device = False
ip_setting = sensor.setting('mac_address').lower()
if device['macAddress'] == ip_setting:
same_device = True
elif device['ipAddress'] == ip_setting:
same_device = True
elif isinstance(device['ipAddress'], list):
if device['ipAddress'][0] == ip_setting:
same_device = True
if same_device:
if device[mapInfo]['mapHierarchyString'] is None or device[mapInfo]['mapHierarchyString'] == floor:
new_property_value = {"x": device[mapCoordinate]['x'],
"y": device[mapCoordinate]['y']}
sensor.update_properties(new_property_value)
# for area_item in areas:
# if device[mapCoordinate]['x'] >= area_item['left']:
# if device[mapCoordinate]['y'] >= area_item['top']:
# if device[mapCoordinate]['x'] <= area_item['left'] + area_item['width']:
# if device[mapCoordinate]['y'] <= area_item['top'] + area_item['height']:
# location_new_value = {"location": area_item["name"]}
# sensor.update_properties(location_new_value)
# return
if 680 > device[mapCoordinate]['x'] > 450:
if abs(device[mapCoordinate]['y'] - 700) <= 20:
location_new_value = {"location": "Meeting Room"}
sensor.update_properties(location_new_value)
except:
print(sys.exc_info()[1])
sensor.update_properties(default_location)
| 2.28125 | 2 |
lib-opencc-android/src/main/jni/OpenCC/binding.gyp | huxiaomao/android-opencc | 5,895 | 12788682 | {
"includes": [
"node/global.gypi",
"node/configs.gypi",
"node/dicts.gypi",
"node/node_opencc.gypi",
]
}
| 1.078125 | 1 |
test_concat.py | gnumber13/fastapi_webpage | 0 | 12788683 | <filename>test_concat.py
import units as un
un.concat_blogs('markdown/')
| 1.320313 | 1 |
python/python_backup/PRAC_PYTHON/deb.py | SayanGhoshBDA/code-backup | 16 | 12788684 | <gh_stars>10-100
a=input("enter a no")
if a>0:
print "a is positive"
else:
print "a is negative" | 3.3125 | 3 |
functions/jaccard_cosine_similarity.py | DalavanCloud/UGESCO | 1 | 12788685 | import re
import math
from collections import Counter
import numpy as np
text1 = '<NAME> mangé du singe'
text2 = 'Nicole a mangé du rat'
class Similarity():
def compute_cosine_similarity(self, string1, string2):
# intersects the words that are common
# in the set of the two words
intersection = set(string1.keys()) & set(string2.keys())
# dot matrix of vec1 and vec2
numerator = sum([string1[x] * string2[x] for x in intersection])
# sum of the squares of each vector
# sum1 is the sum of text1 and same for sum2 for text2
sum1 = sum([string1[x]**2 for x in string1.keys()])
sum2 = sum([string2[x]**2 for x in string2.keys()])
# product of the square root of both sum(s)
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return round(numerator / float(denominator), 4)
def text_to_vector(self, text):
WORD = re.compile(r'\w+')
words = WORD.findall(text)
return Counter(words)
# Jaccard Similarity
def tokenize(self, string):
return string.lower().split(" ")
def jaccard_similarity(self, string1, string2):
intersection = set(string1).intersection(set(string2))
union = set(string1).union(set(string2))
return len(intersection) / float(len(union))
similarity = Similarity()
# vector space
vector1 = similarity.text_to_vector(text1)
vector2 = similarity.text_to_vector(text2)
# split words into tokens
token1 = similarity.tokenize(text1)
token2 = similarity.tokenize(text2)
cosine = similarity.compute_cosine_similarity(vector1, vector2)
print('Cosine Similarity:', cosine)
jaccard = similarity.jaccard_similarity(token1, token2)
print('Jaccard Similarity:', jaccard)
| 3.46875 | 3 |
evolclust/test/test_data.py | maks-ym/evolclust | 0 | 12788686 | import data
import numpy as np
# TODO: split tests 1 test per assert statement
# TODO: move repeating constants out of functions
class TestHAPT:
def test_get_train_data(self):
d = data.HAPT()
assert d._train_attrs is None
d.get_train_data()
assert len(d._train_attrs) > 0
assert len(d.get_train_data()) > 0
def test_get_train_labels(self):
d = data.HAPT()
assert d._train_labels is None
d.get_train_labels()
assert len(d._train_labels) > 0
assert len(d.get_train_labels()) > 0
def test_get_test_data(self):
d = data.HAPT()
assert d._test_attrs is None
d.get_test_data()
assert len(d._test_attrs) > 0
assert len(d.get_test_data()) > 0
def test_get_test_labels(self):
d = data.HAPT()
assert d._test_labels is None
d.get_test_labels()
assert len(d._test_labels) > 0
assert len(d.get_test_labels()) > 0
def test_load_train_data(self):
d = data.HAPT()
assert d._train_attrs is None
assert d._train_labels is None
d.load_train_data()
assert len(d._train_attrs) > 0
assert len(d._train_labels) > 0
assert len(d._train_attrs) == len(d._train_labels)
assert len(d.get_train_data()) == len(d.get_train_labels())
def test_load_test_data(self):
d = data.HAPT()
assert d._test_attrs is None
assert d._test_labels is None
d.load_test_data()
assert len(d._test_attrs) > 0
assert len(d._test_labels) > 0
assert len(d._test_attrs) == len(d._test_labels)
assert len(d.get_test_data()) == len(d.get_test_labels())
def test_load_all_data(self):
d = data.HAPT()
assert d._train_attrs is None
assert d._train_labels is None
assert d._test_attrs is None
assert d._test_labels is None
d.load_all_data()
assert len(d._train_attrs) > 0
assert len(d._train_labels) > 0
assert len(d._test_attrs) > 0
assert len(d._test_labels) > 0
assert len(d._train_attrs) == len(d._train_labels)
assert len(d._test_attrs) == len(d._test_labels)
assert len(d.get_train_data()) == len(d.get_train_labels())
assert len(d.get_test_data()) == len(d.get_test_labels())
def test_get_labels_map(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
assert d._labels == {}
d.get_labels_map()
assert d._labels == orig_labels
assert d.get_labels_map() == orig_labels
def test_aggregate_groups(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
d._labels = orig_labels
d._test_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
d._train_labels = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
d.aggregate_groups()
assert np.array_equal(d._aggregated_test_labels, np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]))
assert np.array_equal(d._aggregated_train_labels, np.array([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]))
assert d._aggregated2initial_labels == {0: [1, 2, 3], 1: [4, 5, 6], 2: [7, 8, 9, 10, 11, 12]}
def test_get_aggr2initial_labs_map(self):
d = data.HAPT()
d.load_all_data()
d.aggregate_groups()
assert d.get_aggr2initial_labs_map() == {
'WALKING': ['WALKING', 'WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS'],
'STATIC': ['SITTING', 'STANDING', 'LAYING'],
'TRANSITION': ['STAND_TO_SIT', 'SIT_TO_STAND', 'SIT_TO_LIE', 'LIE_TO_SIT', 'STAND_TO_LIE', 'LIE_TO_STAND']
}
def test_get_aggregated_test_labels(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
d._labels = orig_labels
d._test_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
d._train_labels = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
assert d.get_aggregated_test_labels() == d._test_labels
d.aggregate_groups()
print(d._aggregated_test_labels)
assert np.array_equal(d.get_aggregated_test_labels(), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]))
def test_get_aggregated_train_labels(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
d._labels = orig_labels
d._test_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
d._train_labels = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
assert d.get_aggregated_train_labels() == d._train_labels
d.aggregate_groups()
assert np.array_equal(d.get_aggregated_train_labels(), np.array([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]))
def test_get_aggregated_labels_map(self):
d = data.HAPT()
assert d.get_aggregated_labels_map() == {0: "WALKING", 1: "STATIC", 2: "TRANSITION"}
| 2.484375 | 2 |
incidentes/migraciones2/0002_auto_20181020_0802.py | Alvaruz/ATMS | 0 | 12788687 | <reponame>Alvaruz/ATMS
# Generated by Django 2.1.2 on 2018-10-20 11:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('incidentes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=15)),
('descripción', models.TextField()),
],
),
migrations.AlterField(
model_name='ticket',
name='categoria',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='incidentes.Categoria'),
),
]
| 1.53125 | 2 |
adadelta.py | morpheusthewhite/twitter-sent-dnn | 314 | 12788688 | <gh_stars>100-1000
"""
Adadelta algorithm implementation
"""
import numpy as np
import theano
import theano.tensor as T
def build_adadelta_updates(params, param_shapes, param_grads, rho=0.95, epsilon=0.001):
# AdaDelta parameter update
# E[g^2]
# initialized to zero
egs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Eg:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
# E[\delta x^2], initialized to zero
exs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Ex:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
new_egs = [
rho * eg + (1 - rho) * g ** 2
for eg, g in zip(egs, param_grads)
]
delta_x = [
-(T.sqrt(ex + epsilon) / T.sqrt(new_eg + epsilon)) * g
for new_eg, ex, g in zip(new_egs, exs, param_grads)
]
new_exs = [
rho * ex + (1 - rho) * (dx ** 2)
for ex, dx in zip(exs, delta_x)
]
egs_updates = zip(egs, new_egs)
exs_updates = zip(exs, new_exs)
param_updates = [
(p, p + dx)
for dx, g, p in zip(delta_x, param_grads, params)
]
updates = egs_updates + exs_updates + param_updates
return updates
| 2.1875 | 2 |
tests/function/test_deprecate_version_module.py | gavincyi/auto-deprecator | 2 | 12788689 | import pytest
from auto_deprecator import deprecate
__version__ = "2.0.0"
@deprecate(
expiry="2.1.0",
version_module="tests.function.test_deprecate_version_module",
)
def simple_deprecate():
pass
@deprecate(
expiry="2.1.0", version_module="tests.function.conftest",
)
def failed_to_locate_version():
pass
@deprecate(
expiry="2.1.0", version_module="tests.function.not_existing_module",
)
def not_existing_module():
pass
def test_no_error_simple_deprecate():
with pytest.warns(DeprecationWarning) as warning:
simple_deprecate()
assert (
'Function "simple_deprecate" will be deprecated on version 2.1.0'
) in warning[0].message.args[0]
def test_failed_to_locate_version():
with pytest.raises(RuntimeError) as error:
failed_to_locate_version()
assert (
"Cannot find version (__version__) from the version module "
'"tests.function.conftest"'
) in str(error.value)
def test_not_existing_module():
with pytest.raises(RuntimeError) as error:
not_existing_module()
assert (
'Cannot locate version module "tests.function.not_existing_module"'
) in str(error.value)
| 2.59375 | 3 |
set_1/p1_6.py | PedroBernini/ipl-2021 | 0 | 12788690 | # Programa para aproximar o valor de π.
p_d = 151
def isValidCell(cell, pd):
return (cell[0] ** 2 + cell[1] ** 2) ** 0.5 <= pd / 2
def getNumberValidCells(pd):
validCells = 0
limit = int(pd / 2)
for j in range(limit, -(limit + 1), -1):
for i in range(-limit, limit + 1):
if isValidCell((i, j), pd):
validCells += 1
return validCells
def getPiApproximation(numberValidCells, totalCells):
return numberValidCells / totalCells * 4
out = getPiApproximation(getNumberValidCells(p_d), p_d ** 2)
print(out) | 3.328125 | 3 |
qtrader/simulation/tests/__init__.py | aaron8tang/qtrader | 381 | 12788691 | <reponame>aaron8tang/qtrader
from qtrader.simulation.tests.arbitrage import Arbitrage
from qtrader.simulation.tests.moments import Moments
| 1.085938 | 1 |
cook.py | hahahaha666/pythonpachong | 0 | 12788692 | from urllib import request
url="http://www.renren.com/970973463"
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Cookie':'anonymid=jw6ali52-qw6ldx; depovince=GUZ; _r01_=1; JSESSIONID=abcv45u4hL5Z0cQdde5Rw; ick_login=99f8241c-bfc0-4cda-9ed9-a1126aa9021e; t=dd1e75d66334a9699f53bc6ddb8c20ea3; societyguester=dd1e75d66334a9699f53bc6ddb8c20ea3; id=970973463; xnsid=6eedc27; jebe_key=5ac606a2-3b4f-4863-80e9-1f0a22bfec2e%7C5f5e2728ff534657c04151fc12f87207%7C1558956815778%7C1%7C1558956814761; XNESSESSIONID=cdf65a586a5f; jebecookies=3b4a8a1d-30fc-44c3-8fe6-fd6adc9781b7|||||; ver=7.0; loginfrom=null; wp_fold=0'
}
req=request.Request(url,headers=headers)
req1=request.urlopen(req)
with open("renren","w",encoding="utf-8") as fp:
fp.write(req1.read().decode("utf-8"))
| 2.375 | 2 |
Chapter1/Modules.py | rabbitism/Beginning-Python-Practice | 0 | 12788693 | import math
from math import sqrt
import cmath
print("Module math imported")
print(math.floor(32.9))
print(int(32.9))
print(math.ceil(32.3))
print(math.ceil(32))
print(sqrt(9))
print(sqrt(2))
#cmath and Complex Numbers
##print(sqrt(-1)) This will trigger a ValueError: math domain error
print(cmath.sqrt(-1))
print((1+3j)*(9+4j))
| 2.875 | 3 |
First REST API/app.py | ccruz182/Python-Flask | 0 | 12788694 | from flask import Flask, jsonify, request
app = Flask(__name__) # Gives a unique name
stores = [
{
'name': 'MyStore',
'items': [
{
'name': 'My Item',
'price': 15.99
}
]
}
]
"""
@app.route('/') # Route of the endpoint 'http://www.google.com/'
def home():
return "Hello, world!"
"""
# POST /store. data: {name: }
@app.route('/store', methods=['POST'])
def create_store():
request_data = request.get_json()
new_store = {'name': request_data['name'], 'items': []}
stores.append(new_store)
return jsonify(new_store)
# GET /store/<string:name>
@app.route('/store/<string:name>')
def get_store(name):
store = list(filter(lambda store: store['name'] == name, stores))
if store == []:
return jsonify({'message': 'store not found'})
else:
return jsonify(store)
# GET /store
@app.route('/store')
def get_stores():
return jsonify({'stores': stores})
# POST /store/<string:name>/item
@app.route('/store/<string:name>/item', methods=['POST'])
def create_item_in_store(name):
request_data = request.get_json()
store = list(filter(lambda store: store['name'] == name, stores))
new_item = {'name': request_data['name'], 'price': request_data['price']}
if store == []:
return jsonify({'message': 'store not found'})
store[0]['items'].append(new_item)
return jsonify(new_item)
# GET /store/<string:name>/item
@app.route('/store/<string:name>/item')
def get_items_in_store(name):
store = list(filter(lambda store: store['name'] == name, stores))
if store == []:
return jsonify({'message': 'store not found'})
else:
return jsonify({'items': store[0]['items']})
app.run(host= '0.0.0.0', port=5000) | 3.296875 | 3 |
setup.py | yingnn/fq2vcf | 0 | 12788695 | #!/usr/bin/env python
"""fq2vcf
"""
from __future__ import division, print_function
import os
import glob
from setuptools import setup, find_packages
VERSION = '0.0.0'
scripts = ['scripts/fq2vcf']
scripts.extend(glob.glob('scripts/*.sh'))
scripts.extend(glob.glob('scripts/*.py'))
print(scripts)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="fq2vcf",
packages=find_packages(),
version=VERSION,
description="fq2vcf",
maintainer='yingnn',
author='yingnn',
long_description=read('README.md'),
keywords=['SNP InDel calling workflow', 'SNP InDel calling pipeline'],
licence='MIT license',
include_package_data=True,
platforms=["Linux", "Mac OS-X", "Unix"],
install_requires=['argparse',
'configparser'],
scripts=scripts,
)
| 1.914063 | 2 |
python/viewer/radar_image.py | LordHui/ZendarSDK | 7 | 12788696 | import numpy as np
from collections import namedtuple
from util import (
vec3d_to_array,
quat_to_array,
array_to_vec3d_pb,
array_to_quat_pb,
)
from radar_data_streamer import RadarData
from data_pb2 import Image
Extrinsic = namedtuple('Extrinsic', ['position', 'attitude'])
class RadarImage(RadarData):
"""
This class is a Python representation of the protobuf Image object for
convinent downstream operations
"""
def __init__(self, timestamp, frame_id, extrinsic, image_model, image):
self.timestamp = timestamp
self.frame_id = frame_id
self.extrinsic = extrinsic
self.image_model = image_model
self.image = image
@classmethod
def from_proto(cls, image_pb):
timestamp = image_pb.meta.timestamp
frame_id = image_pb.meta.frame_id
extrinsic = Extrinsic(
position=vec3d_to_array(image_pb.meta.position),
attitude=quat_to_array(image_pb.meta.attitude))
image_model = ImageModel(
origin=vec3d_to_array(image_pb.cartesian.model.origin),
di=vec3d_to_array(image_pb.cartesian.model.di),
dj=vec3d_to_array(image_pb.cartesian.model.dj))
# create the image array
image_shape = (image_pb.cartesian.data.cols,
image_pb.cartesian.data.rows)
image_data = np.frombuffer(image_pb.cartesian.data.data,
dtype=np.uint32)
# copy image_data because we do not own the memory
image = np.reshape(image_data.copy(), image_shape)
radar_image = cls(timestamp, frame_id, extrinsic, image_model, image)
return radar_image
def to_proto(self, timestamp, frame_id):
image_pb = Image()
image_pb.meta.timestamp = timestamp
image_pb.meta.frame_id = frame_id
# Setting the type to REAL_32U
image_pb.cartesian.data.type = 5
array_to_vec3d_pb(image_pb.meta.position,
self.extrinsic.position)
array_to_quat_pb(image_pb.meta.attitude,
self.extrinsic.attitude)
array_to_vec3d_pb(image_pb.cartesian.model.origin,
self.image_model.origin)
array_to_vec3d_pb(image_pb.cartesian.model.di,
self.image_model.di)
array_to_vec3d_pb(image_pb.cartesian.model.dj,
self.image_model.dj)
image_pb.cartesian.data.cols, image_pb.cartesian.data.rows = \
self.image.shape
return image_pb
class ImageModel(object):
"""
ImageModel describing mapping from world coordinate to image model
"""
def __init__(self, origin, di, dj):
self.di = di
self.dj = dj
self.origin = origin
def global_to_image(self, ecef_point):
radar_to_image = ecef_point - self.origin
i_res = np.linalg.norm(self.di)
j_res = np.linalg.norm(self.dj)
i_dir = self.di/i_res
j_dir = self.dj/j_res
i_proj = int(round(radar_to_image.dot(i_dir)/i_res))
j_proj = int(round(radar_to_image.dot(j_dir)/j_res))
pixel_point = (i_proj, j_proj)
return pixel_point
def image_to_global(self, pixel_point):
i_idx = pixel_point[0]
j_idx = pixel_point[1]
ecef_point = self.origin + (i_idx*self.di) + (j_idx*self.dj)
return ecef_point
| 2.8125 | 3 |
contacts/__init__.py | heimann/contacts | 1 | 12788697 | # __ __ ___ _______
#/ `/ \|\ || /\ / `|/__`
#\__,\__/| \||/~~\\__,|.__/
#
"""
Contact Cards
~~~~~~~~~~~~~~~~~~~~~
Contacts let's you create Contact Cards in Python that just work, so you can worry about having meaningful conversations
with people who know who you are.
Here's how it works:
>>> from contacts import ContactCard
>>> card = ContactCard()
>>> card.name = '<NAME>'
>>> card.first_name = 'David'
>>> card.last_name = 'Heimann'
>>> card.photo = image_file
>>> card.phone_number = '+1XXXXXXX'
>>> card.twitter = '@david_heimann'
>>> card.build()
Card built.
>>> print(card)
Contact Card (vobject)
Name: <NAME>
First Name: David
Last Name: Heimann
Phone Number: +1XXXXXXXX
Photo: JPEG (2 MB)
Twitter: @david_heimann
Card built at: <timestamp>
:copyright: (c) 2017 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
from .api import ContactCard | 3.5625 | 4 |
Gina.py | Zekx/CS332Fighting_Game | 0 | 12788698 | <reponame>Zekx/CS332Fighting_Game
import pygame
from boxes import HurtBox
from boxes import HitBox
from boxes import DamageBox
from boxes import InvincibleBox
from boxes import GrabBox
from projectile import GinaFireBall
from effects import *
from Character import Character
class Gina(Character):
def __init__(self):
"""
Initializes the class for the character Gina.
Stores her animations of all of her attacks and saves the statistics for the player.
:return:
"""
super().__init__()
self.name = 'Gina'
self.health = 200
self.meter = 100
self.jump_height = 400
self.portrait = pygame.image.load('Sprites/Gina/GinaPortrait.png')
self.image = pygame.image.load('Sprites/Gina/GinaStand1.png')
self.rect = self.image.get_rect()
self.x_offset = 10
self.y_offset = 30
self.dashImage = pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaFDash.png'), (180, 250))
self.neutralPosition = pygame.image.load('Sprites/Gina/GinaStand1.png')
self.jumpImage = pygame.image.load('Sprites/Gina/GinaJump4.png')
# The following loops add in all of the sprites for the animation...
# Inserts Gina's Victory animation.
for x in range(0, 120):
self.victory_animation.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaWin.png'), (130, 250)))
# Inserts Gina's Time Out Lose animation
for x in range(0, 60):
self.defeat_animation.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaTimeLose.png'), (130, 250)))
# Inserts Gina's Crumble Lose animation
for x in range(0, 15):
self.crumble_animation.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt1.png'), (130, 250)))
for x in range(0, 15):
self.crumble_animation.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt2.png'), (130, 250)))
for x in range(0, 30):
self.crumble_animation.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaKnockdown.png'), (240, 130)))
# Inserts Gina's standing animation.
for x in range(0, 30):
self.standing.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaStand1.png'), (130, 250)))
for x in range(0, 30):
self.standing.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaStand2.png'), (130, 250)))
for x in range(0, 30):
self.standing.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaStand3.png'), (130, 250)))
#Inserts Gina's crouching animation.
for x in range(0, 30):
self.crouching.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaCrouch1.png'), (130, 250)))
for x in range(0, 30):
self.crouching.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaCrouch2.png'), (130, 250)))
for x in range(0, 30):
self.crouching.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaCrouch3.png'), (130, 250)))
#Inserts Gina's dash animation.
for x in range(0, 85):
self.dash.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaFDash.png'), (180, 250)))
#Inserts Gina's back-dash animation.
for x in range(0, 75):
self.backdash.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaBDash.png'), (150, 250)))
#Inserts Gina's jumping animation.
for x in range(0, 30):
self.jump.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump1.png'), (130, 250)))
for x in range(0, 30):
self.jump.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump2.png'), (130, 250)))
for x in range(0, 80):
self.jump.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'), (130, 250)))
for x in range(0, 100):
self.jump.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'), (130, 250)))
#Inserts Gina's Forward walking animation.
for x in range(0, 15):
self.walkFoward.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaFWalk1.png'), (130, 250)))
for x in range(0, 15):
self.walkFoward.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaFWalk2.png'), (130, 250)))
for x in range(0, 15):
self.walkFoward.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaFWalk3.png'), (130, 250)))
for x in range(0, 15):
self.walkFoward.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaFWalk4.png'), (130, 250)))
for x in range(0, 15):
self.walkFoward.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaFWalk5.png'), (130, 250)))
for x in range(0, 15):
self.walkFoward.append(pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaFWalk6.png'), (130, 250)))
#Inserts Gina's Backward walking animation.
for x in range(0, 20):
self.walkBackward.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/GinaBWalk1.png'), (130, 250)))
for x in range(0, 20):
self.walkBackward.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/GinaBWalk2.png'), (130, 250)))
for x in range(0, 20):
self.walkBackward.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/GinaBWalk3.png'), (130, 250)))
for x in range(0, 20):
self.walkBackward.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/GinaBWalk4.png'), (130, 250)))
# Inserts Gina's standing A attack animation.
for x in range(0, 2):
self.stand_a_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandA1.png'), (130, 250)
))
for x in range(0, 2):
self.stand_a_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandA2.png'), (130, 250)
))
for x in range(0, 6):
self.stand_a_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandA3.png'), (150, 250)
))
for x in range(0, 3):
self.stand_a_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandA2.png'), (130, 250)
))
# Inserts Gina's crouching A attack animation.
for x in range(0, 4):
self.crouch_a_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchA1.png'), (150, 250)
))
for x in range(0, 4):
self.crouch_a_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchA2.png'), (150, 250)
))
for x in range(0, 6):
self.crouch_a_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchA3.png'), (150, 250)
))
for x in range(0, 6):
self.crouch_a_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchA2.png'), (150, 250)
))
# Inserts Gina's jumping A attack animation.
for x in range(0, 10):
self.jump_a_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaJumpA1.png'), (130, 250)
))
# Inserts Gina's standing B attack animation
for x in range(0, 3):
self.stand_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandB1-1.png'), (200, 250)
))
for x in range(0, 4):
self.stand_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandB1.png'), (200, 250)
))
for x in range(0, 6):
self.stand_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandB2.png'), (200, 250)
))
for x in range(0, 13):
self.stand_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandB3.png'), (200, 250)
))
for x in range(0, 7):
self.stand_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandB4.png'), (200, 250)
))
for x in range(0, 5):
self.stand_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandB1.png'), (200, 250)
))
for x in range(0, 3):
self.stand_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandB1-1.png'), (200, 250)
))
# Inserts Gina's crouching B attack animation
for x in range(0, 8):
self.crouch_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchB1.png'), (130, 250)
))
for x in range(0, 6):
self.crouch_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchB2.png'), (130, 250)
))
for x in range(0, 6):
self.crouch_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchB3.png'), (130, 250)
))
for x in range(0, 8):
self.crouch_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchB2.png'), (130, 250)
))
for x in range(0, 8):
self.crouch_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchB1.png'), (130, 250)
))
# Inserts Gina's jumping B attack animation
for x in range(0, 4):
self.jump_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaJumpB1.png'), (230, 250)
))
for x in range(0, 12):
self.jump_b_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaJumpB2.png'), (230, 250)
))
# Inserts Gina's standing C attack animation
for x in range(0, 4):
self.stand_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandC1.png'), (130, 250)
))
for x in range(0, 8):
self.stand_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandC2.png'), (160, 250)
))
for x in range(0, 9):
self.stand_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandC3.png'), (160, 250)
))
for x in range(0, 8):
self.stand_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandC4.png'), (130, 250)
))
for x in range(0, 10):
self.stand_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaStandC5.png'), (130, 250)
))
# Inserts Gina's crouching C attack animation
for x in range(0, 6):
self.crouch_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchC1.png'), (230, 250)
))
for x in range(0, 8):
self.crouch_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchC3.png'), (230, 250)
))
for x in range(0, 10):
self.crouch_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchC4.png'), (230, 250)
))
for x in range(0, 10):
self.crouch_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaCrouchC3.png'), (230, 250)
))
# Inserts Gina's jump C attack animation
for x in range(0, 10):
self.jump_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaJumpC1.png'), (170, 250)
))
for x in range(0, 5):
self.jump_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaJumpC2.png'), (170, 250)
))
for x in range(0, 5):
self.jump_c_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaJumpC3.png'), (170, 250)
))
# Inserts Gina's wakeup animation
for x in range(0, 30):
self.wakeup_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/GinaKnockdown.png'), (240, 130)
))
for x in range(0, 20):
self.wakeup_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/GinaWakeUp1.png'), (240, 180)
))
for x in range(0, 20):
self.wakeup_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/GinaWakeUp2.png'), (250, 180)
))
# Inserts Gina's grab animation
for x in range(0, 8):
self.grab_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaGrab1.png'), (190, 250)
))
for x in range(0, 8):
self.grab_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaGrab2.png'), (190, 250)
))
for x in range(0, 8):
self.grab_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaGrab3.png'), (190, 250)
))
# Inserts Gina's Hurt animation
for x in range(0, 1):
self.hurt_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/GinaHurt1.png'), (130, 250)
))
# Inserts Gina's special one animation
for x in range(0, 8):
self.special_one_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaFireBall1.png'), (190, 250)
))
for x in range(0, 8):
self.special_one_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaFireBall2.png'), (190, 250)
))
for x in range(0, 8):
self.special_one_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaFireBall3.png'), (190, 250)
))
for x in range(0, 12):
self.special_one_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/Attacks/GinaFireBall4.png'), (190, 250)
))
# Inserts Gina's Hurt animation
for x in range(0, 1):
self.hurt_animation.append(pygame.transform.scale(
pygame.image.load('Sprites/Gina/GinaHurt1.png'), (130, 250)
))
# ----------------------------------------------------------------------------------------------------------------
def update_hurt_box(self, player):
"""
This class updates the character's hurtbox as the battle goes on. Depending on her current action,
the hurtbox's size and position will constantly change.
:param player:
:return:
"""
# Assigns initial hurtboxes and hitboxes for the character...
self.hurt_box.clear()
if player.facingRight:
if not player.setAction:
if player.crouching:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 70, self.rect.width, self.rect.height - 20))
else:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.got_air_hit:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height - 30))
elif player.got_hit:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.getting_up:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.isBackDashing:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.grabbing:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.grabbed:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.isDashing:
self.hurt_box.append(HurtBox(player.x + 70 + player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.isJumping or player.isDescending:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height - 30))
elif player.special_one:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.attack_a:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.crouch_attack_a:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 70, self.rect.width, self.rect.height - 20))
elif player.attack_b:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.crouch_attack_b:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 70, self.rect.width, self.rect.height - 20))
elif player.attack_c:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.crouch_attack_c:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 120, self.rect.width, self.rect.height - 50))
elif player.loser:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value
, player.y + 70, self.rect.width, self.rect.height - 20))
elif player.winner:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
else:
if not player.setAction:
if player.crouching:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 70, self.rect.width, self.rect.height - 20))
else:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.got_air_hit:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height - 30))
elif player.got_hit:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.getting_up:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.isBackDashing:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.grabbing:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.grabbed:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.isDashing:
self.hurt_box.append(HurtBox(
(player.x + 20)+ player.off_set_value
, (player.y + 30), self.rect.width + 10, self.rect.height + 30))
elif player.isJumping or player.isDescending:
self.hurt_box.append(HurtBox(
(player.x + 20) + player.off_set_value
, (player.y + 30), self.rect.width + 10, self.rect.height - 30))
elif player.special_one:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.attack_a:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.crouch_attack_a:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 70, self.rect.width, self.rect.height - 20))
elif player.attack_b:
self.hurt_box.append(HurtBox(player.x + 180 - player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.crouch_attack_b:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 70, self.rect.width, self.rect.height - 20))
elif player.attack_c:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 30, self.rect.width + 10, self.rect.height + 30))
elif player.crouch_attack_c:
self.hurt_box.append(HurtBox(player.x + 20 + player.off_set_value
, player.y + 120, self.rect.width, self.rect.height - 50))
elif player.loser:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value
, player.y + 70, self.rect.width, self.rect.height - 20))
elif player.winner:
self.hurt_box.append(InvincibleBox(player.x + 20 + player.off_set_value,
player.y + 30, self.rect.width + 10, self.rect.height + 30))
if len(self.hurt_box) > 0:
player.collision_x = self.hurt_box[0].rect.x
player.collision_width = self.hurt_box[0].rect.width
# -----------------------------------------------------------------------------------------------------------------
def win_state(self, player):
"""
Plays the win animation where the character has won the battle.
:param player:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.multiplier == 1:
if player.index > len(player.character.victory_animation)-1:
player.index = 0
player.finish_win_animation = True
else:
player.character.image = player.character.victory_animation[player.index]
else:
if player.index > len(player.character.victory_animation)-1:
player.index = 0
player.finish_win_animation = True
else:
player.character.image =\
pygame.transform.flip(player.character.victory_animation[player.index], True, False)
player.index += 1
def lose_state(self, player, two):
"""
Plays the lose animation or defeat animation when the character's hp is gone or if their hp is less
than the opponent when time runs out.
:param player:
:param two:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.multiplier == 1:
if player.health_points > 0:
if player.index >= len(player.character.defeat_animation)-1:
player.index = len(player.character.defeat_animation)-1
player.finish_lose_animation = True
two.winner = True
else:
player.character.image = player.character.defeat_animation[player.index]
player.index += 1
else:
if player.index >= len(player.character.crumble_animation)-1:
player.index = len(player.character.crumble_animation)-1
player.finish_lose_animation = True
two.winner = True
else:
if player.index == 0:
player.y = player.yOriginal
if player.index == 30:
player.y = player.yKnockdown
player.character.image = player.character.crumble_animation[player.index]
player.index += 1
else:
if player.health_points > 0:
if player.index >= len(player.character.defeat_animation)-1:
player.index = len(player.character.defeat_animation)-1
player.finish_lose_animation = True
two.winner = True
else:
player.character.image =\
pygame.transform.flip(player.character.defeat_animation[player.index], True, False)
player.index += 1
else:
if player.index >= len(player.character.crumble_animation)-1:
player.index = len(player.character.crumble_animation)-1
player.finish_lose_animation = True
two.winner = True
else:
if player.index == 0:
player.y = player.yOriginal
if player.index == 30:
player.y = player.yKnockdown
player.character.image =\
pygame.transform.flip(player.character.crumble_animation[player.index], True, False)
player.index += 1
# -----------------------------------------------------------------------------------------------------------------
def stand_a(self, player):
"""
Plays the character's Stand A attack and updates hit box.
:param player:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.multiplier == 1:
if player.index > len(player.character.stand_a_animation)-1:
player.index = 0
else:
if player.index > 8 and player.index < 10 and player.hit_box_fill_once is not True:
stand_a_hit_box = HitBox(player.x + 100, player.y + 80, 50, 50, damage=5, hitstun=8, knockback= 3,
knockdown= False, blocktype='stand', attack_level=1)
self.hit_box.append(stand_a_hit_box)
player.hit_box_fill_once = True
player.character.image = player.character.stand_a_animation[player.index]
player.index += 1
else:
if player.index > len(player.character.stand_a_animation)-1:
player.index = 0
else:
if player.index >= 4 and player.index <= 9:
if player.off_set is False:
player.x -= 17
player.off_set = True
player.off_set_value = 17
else:
if player.off_set is True:
player.x += 17
player.off_set = False
player.off_set_value = 0
if player.index > 8 and player.index < 10 and player.hit_box_fill_once is not True:
stand_a_hit_box = HitBox(player.x - 20 + player.off_set_value, player.y + 80, 50, 50,
damage=5, hitstun=8, knockback= 3,knockdown= False, blocktype='stand',
attack_level=1)
self.hit_box.append(stand_a_hit_box)
player.hit_box_fill_once = True
player.character.image = \
pygame.transform.flip(player.character.stand_a_animation[player.index], True, False)
player.index += 1
if player.index > 12:
self.hit_box.clear()
if player.index >= len(player.character.stand_a_animation):
player.setAction = False
player.attack_a = False
player.hit_box_fill_once = False
def crouch_a(self, player):
"""
Plays the character's Crouch A attack and updates hit box.
:param player:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.multiplier == 1:
if player.index > len(player.character.crouch_a_animation)-1:
player.index = 0
else:
if player.index == 7:
player.x += 4
if player.index > 10 and player.index < 12 and player.hit_box_fill_once is not True:
stand_a_hit_box = HitBox(player.x + 100, player.y + 100, 70, 50, damage=5, hitstun=16, knockback= 2,
knockdown= False, blocktype='crouch',attack_level=1)
self.hit_box.append(stand_a_hit_box)
player.hit_box_fill_once = True
player.character.image = player.character.crouch_a_animation[player.index]
player.index += 1
else:
if player.index > len(player.character.crouch_a_animation)-1:
player.index = 0
else:
if player.index >= 0 and player.index < len(player.character.crouch_a_animation)-1:
if player.off_set is False:
player.x -= 20
player.off_set = True
player.off_set_value = 20
else:
if player.off_set is True:
player.x += 20
player.off_set = False
player.off_set_value = 0
if player.index == 7:
player.x -= 4
if player.index > 10 and player.index < 12 and player.hit_box_fill_once is not True:
stand_a_hit_box = HitBox(player.x - 20 + player.off_set_value, player.y + 100, 70, 50,
damage=5, hitstun=16, knockback= 2,knockdown= False, blocktype='crouch',
attack_level=1)
self.hit_box.append(stand_a_hit_box)
player.hit_box_fill_once = True
player.character.image = \
pygame.transform.flip(player.character.crouch_a_animation[player.index], True, False)
player.index += 1
if player.index > 12:
self.hit_box.clear()
if player.index >= len(player.character.crouch_a_animation):
player.setAction = False
pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaCrouch1.png'), (130, 250))
, True, False)
player.crouch_attack_a = False
player.hit_box_fill_once = False
def jump_a(self, player):
"""
Plays the character's Jump A attack and updates hit box.
:param player:
:return:
"""
self.hit_box.clear()
if player.index > len(player.character.jump_a_animation)-1:
player.index = 0
else:
if player.multiplier == 1:
player.character.image = player.character.jump_a_animation[player.index]
if player.index >= 0 and player.index < 5 and player.hit_box_fill_once is False:
jump_a_hit_box = HitBox(player.x + 100 + player.off_set_value, player.y - 10, 50, 50,
damage=5, hitstun=8, knockback= 3,knockdown= False, blocktype='overhead',
attack_level=1)
self.hit_box.append(jump_a_hit_box)
# player.hit_box_fill_once = True
else:
player.character.image = \
pygame.transform.flip(player.character.jump_a_animation[player.index],True, False)
if player.index >= 0 and player.index < 5 and player.hit_box_fill_once is False:
jump_a_hit_box = HitBox(player.x - 20 + player.off_set_value, player.y - 10, 50, 50,
damage=5, hitstun=8, knockback= 3,knockdown= False, blocktype='overhead'
,attack_level=1)
self.hit_box.append(jump_a_hit_box)
# player.hit_box_fill_once = True
player.index += 1
if player.index > len(player.character.jump_a_animation)-1:
player.jump_attack_a = False
player.air_attack_once = True
player.hit_box_fill_once = False
def stand_b(self, player):
"""
Plays the character's Stand B attack and updates hit box.
:param player:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.multiplier == 1:
if player.index > len(player.character.stand_b_animation):
player.index = 0
else:
if player.index == 5:
player.x += 10
if player.index > 9 and player.index < 18 and player.hit_box_fill_once is not True:
stand_b_hit_box = HitBox(player.x + 160, player.y + 120, 50, 50, damage=15, hitstun=20, knockback= 4,
knockdown= False, blocktype='stand', attack_level=2)
self.hit_box.append(stand_b_hit_box)
player.hit_box_fill_once = True
if player.index > 7 and player.index < 20:
self.hurt_box.append(DamageBox(player.x + 130 + player.off_set_value,
player.y + 120, 100, 60))
player.character.image = player.character.stand_b_animation[player.index]
if player.index == 34:
player.x -= 10
player.index += 1
if player.index > 19:
self.hit_box.clear()
else:
if player.index == 5:
player.x -= 10
if player.index >= 0 and player.index < len(player.character.stand_b_animation)-1:
if player.off_set is False:
player.x -= 85
player.off_set = True
player.off_set_value = 85
else:
if player.off_set is True:
player.x += 85
player.off_set = False
player.off_set_value = 0
if player.index >= 9 and player.index < 18 and player.hit_box_fill_once is not True:
stand_b_hit_box = HitBox(player.x - 90 + player.off_set_value, player.y + 120, 50, 50,
damage=15, hitstun=20, knockback= 4,knockdown= False, blocktype='stand'
,attack_level=2)
self.hit_box.append(stand_b_hit_box)
player.hit_box_fill_once = True
if player.index > 7 and player.index < 20:
self.hurt_box.append(DamageBox(player.x - 115 + player.off_set_value,
player.y + 120, 100, 60))
if player.index > len(player.character.stand_b_animation) - 1:
player.character.image = \
pygame.transform.flip(player.character.stand_b_animation[player.index-2], True, False)
else:
player.character.image = \
pygame.transform.flip(player.character.stand_b_animation[player.index], True, False)
if player.index == 34:
player.x += 10
if player.index > 19:
self.hit_box.clear()
player.index += 1
if player.index > len(player.character.stand_b_animation)-1:
player.character.image = \
pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaStand1.png'), (130, 250))
, True, False)
player.setAction = False
player.off_set = False
player.off_set_value = 0
player.attack_b = False
player.hit_box_fill_once = False
def crouch_b(self, player):
"""
Plays the character's Crouch B attack and updates hit box.
:param player:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.multiplier == 1:
if player.index > len(player.character.crouch_b_animation)-1:
player.index = 0
else:
if player.index > 12 and player.index < 18 and player.hit_box_fill_once is not True:
stand_a_hit_box = HitBox(player.x + 70, player.y, 80, 100, damage=15, hitstun=12, knockback= 3,
knockdown= False, blocktype='stand',attack_level=1)
self.hit_box.append(stand_a_hit_box)
player.hit_box_fill_once = True
player.character.image = player.character.crouch_b_animation[player.index]
player.index += 1
else:
if player.index > len(player.character.crouch_b_animation)-1:
player.index = 0
else:
if player.index >= 12 and player.index <= 18:
if player.off_set is False:
player.x -= 0
player.off_set = True
player.off_set_value = 0
else:
if player.off_set is True:
player.x += 0
player.off_set = False
player.off_set_value = 0
if player.index > 12 and player.index < 18 and player.hit_box_fill_once is not True:
crouch_b_hit_box = HitBox(player.x - 20 + player.off_set_value, player.y, 80, 100,
damage=15, hitstun=12, knockback= 3,knockdown= False, blocktype='stand'
,attack_level=1)
self.hit_box.append(crouch_b_hit_box)
player.hit_box_fill_once = True
player.character.image = \
pygame.transform.flip(player.character.crouch_b_animation[player.index], True, False)
player.index += 1
if player.index > 18:
self.hit_box.clear()
if player.index >= len(player.character.crouch_b_animation):
player.setAction = False
player.crouch_attack_b = False
player.hit_box_fill_once = False
def jump_b(self, player):
"""
Plays the character's Jump B attack and updates hit box.
:param player:
:return:
"""
self.hit_box.clear()
if player.index > len(player.character.jump_b_animation)-1:
player.index = 0
else:
if player.multiplier == 1:
player.character.image = player.character.jump_b_animation[player.index]
if player.index >= 6 and player.index < 12 and player.hit_box_fill_once is False \
and player.hit_confirm is False:
jump_b_hit_box = HitBox(player.x + 120 + player.off_set_value, player.y + 130, 100, 50,
damage=5, hitstun=12, knockback= 4,knockdown= False, blocktype='overhead'
,attack_level=2)
jump_b_hit_box_2 = HitBox(player.x + player.off_set_value, player.y + 170, 50, 50,
damage=5, hitstun=12, knockback= 4,knockdown= False, blocktype='overhead'
,attack_level=2)
self.hit_box.append(jump_b_hit_box)
self.hit_box.append(jump_b_hit_box_2)
# player.hit_box_fill_once = True
if player.index > 6 and player.index < len(player.character.jump_b_animation)-1:
self.hurt_box.append(DamageBox(player.x + 110 + player.off_set_value,
player.y + 130, 80, 60))
else:
if player.index >= 0 and player.index < len(player.character.jump_b_animation)-1:
if player.off_set is False:
player.x -= 100
player.off_set = True
player.off_set_value = 100
else:
if player.off_set is True:
player.x += 100
player.off_set = False
player.off_set_value = 0
player.character.image = \
pygame.transform.flip(player.character.jump_b_animation[player.index],True, False)
if player.index >= 6 and player.index < 12 and player.hit_box_fill_once is False \
and player.hit_confirm is False:
jump_b_hit_box = HitBox(player.x - 70 + player.off_set_value, player.y + 130, 100, 50,
damage=5, hitstun=12, knockback= 4,knockdown= False, blocktype='overhead'
,attack_level=2)
jump_b_hit_box_2 = HitBox(player.x + 80 + player.off_set_value, player.y + 170, 50, 50,
damage=5, hitstun=12, knockback= 4,knockdown= False, blocktype='overhead'
,attack_level=2)
self.hit_box.append(jump_b_hit_box)
self.hit_box.append(jump_b_hit_box_2)
# player.hit_box_fill_once = True
if player.index > 6 and player.index < len(player.character.jump_b_animation)-1:
self.hurt_box.append(DamageBox(player.x - 80 + player.off_set_value,
player.y + 130, 80, 60))
player.index += 1
if player.index > len(player.character.jump_b_animation)-1:
if player.multiplier == 1:
player.character.image = pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'),
(130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'),
(130, 250)), True, False)
player.off_set = False
player.jump_attack_b = False
player.air_attack_once = True
player.hit_box_fill_once = False
def stand_c(self, player):
"""
Plays the character's Stand C attack and updates hit box.
:param player:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.multiplier == 1:
if player.index > len(player.character.stand_c_animation)-1:
player.index = 0
else:
if player.index > 13 and player.index < 25 and player.hit_box_fill_once is not True:
stand_c_hit_box_1 = HitBox(player.x + 100, player.y + 50, 80, 70,
damage=20, hitstun=12, knockback= 3,
knockdown= False, blocktype='stand', attack_level=3)
self.hit_box.append(stand_c_hit_box_1)
player.hit_box_fill_once = True
if player.index > 26 and player.index < 34 and player.hit_box_fill_twice is not True:
stand_c_hit_box_2 = HitBox(player.x + 110, player.y + 40, 80, 70,
damage=20, hitstun=12, knockback= -3,
knockdown= False, blocktype='stand', attack_level=3)
self.hit_box.append(stand_c_hit_box_2)
player.hit_box_fill_twice = True
if player.index > 10 and player.index < 35:
self.hurt_box.append(DamageBox(player.x + 100 + player.off_set_value,
player.y + 55, 100, 60))
player.character.image = player.character.stand_c_animation[player.index]
player.index += 1
else:
if player.index > len(player.character.stand_c_animation)-1:
player.index = 0
else:
if player.index >= 4 and player.index <= 20:
if player.off_set is False:
player.x -= 20
player.off_set = True
player.off_set_value = 20
else:
if player.off_set is True:
player.x += 20
player.off_set = False
player.off_set_value = 0
if player.index > 13 and player.index < 25 and player.hit_box_fill_once is not True:
stand_c_hit_box_1 = HitBox(player.x - 40 + player.off_set_value, player.y + 50, 80, 70,
damage=5, hitstun=12, knockback= 3,knockdown= False, blocktype='stand'
,attack_level=3)
self.hit_box.append(stand_c_hit_box_1)
player.hit_box_fill_once = True
if player.index > 26 and player.index < 34 and player.hit_box_fill_twice is not True:
stand_c_hit_box_2 = HitBox(player.x - 50 + player.off_set_value, player.y + 40, 80, 70,
damage=20, hitstun=12, knockback= -3,
knockdown= False, blocktype='stand', attack_level=3)
self.hit_box.append(stand_c_hit_box_2)
player.hit_box_fill_twice = True
if player.index > 10 and player.index < 35:
self.hurt_box.append(DamageBox(player.x - 60 + player.off_set_value,
player.y + 55, 100, 60))
player.character.image = \
pygame.transform.flip(player.character.stand_c_animation[player.index], True, False)
player.index += 1
if player.index > 35:
self.hit_box.clear()
if player.index >= len(player.character.stand_c_animation):
player.setAction = False
player.attack_c = False
player.hit_box_fill_once = False
player.hit_box_fill_twice = False
def crouch_c(self, player):
"""
Plays the character's Crouch C attack and updates hit box.
:param player:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.multiplier == 1:
if player.index > len(player.character.crouch_c_animation)-1:
player.index = 0
if player.index >= 14 and player.index <= 24 and player.hit_box_fill_once is not True:
crouch_c_hit_box_1 = HitBox(player.x + 130, player.y + 200, 100, 20,
damage=14, hitstun=15, knockback= 3,
knockdown= True, blocktype='sweep', attack_level=3)
self.hit_box.append(crouch_c_hit_box_1)
player.hit_box_fill_once = True
player.character.image = player.character.crouch_c_animation[player.index]
player.index += 1
else:
if player.index > len(player.character.crouch_c_animation)-1:
player.index = 0
if player.index >= 0 and player.index < len(player.character.crouch_c_animation)-1:
if player.off_set is False:
player.x -= 100
player.off_set = True
player.off_set_value = 100
else:
if player.off_set is True:
player.x += 100
player.off_set = False
player.off_set_value = 0
if player.index >= 14 and player.index <= 24 and player.hit_box_fill_once is not True:
crouch_c_hit_box_1 = HitBox(player.x, player.y + 200, 100, 20,
damage=14, hitstun=15, knockback= 3,
knockdown= True, blocktype='sweep', attack_level=3)
self.hit_box.append(crouch_c_hit_box_1)
player.hit_box_fill_once = True
player.character.image = \
pygame.transform.flip(player.character.crouch_c_animation[player.index], True, False)
player.index += 1
if player.index > 24:
self.hit_box.clear()
if player.index > len(player.character.crouch_c_animation)-1:
if player.multiplier == 1:
player.character.image = player.character.crouching[0]
else:
player.character.image = \
pygame.transform.flip(player.character.crouching[0], True, False)
player.setAction = False
player.crouch_attack_c = False
player.hit_box_fill_once = False
def jump_c(self, player):
"""
Plays the character's Jump C attack and updates hit box.
:param player:
:return:
"""
self.hit_box.clear()
if player.index > len(player.character.jump_c_animation)-1:
player.index = 0
else:
if player.multiplier == 1:
player.character.image = player.character.jump_c_animation[player.index]
if player.index >= 15 and player.index < len(player.character.jump_c_animation)-1 \
and player.hit_box_fill_once is False and player.hit_confirm is False:
jump_c_hitbox = HitBox(player.x + 80 + player.off_set_value, player.y + 150, 50, 70,
damage=10, hitstun=15, knockback= 5,knockdown= False, blocktype='overhead'
,attack_level=3)
self.hit_box.append(jump_c_hitbox)
else:
if player.index >= 0 and player.index < len(player.character.jump_c_animation)-1:
if player.off_set is False:
player.off_set = True
player.off_set_value = 40
player.x -= 40
else:
if player.off_set is True:
player.off_set = False
player.off_set_value = 0
player.x += 40
if player.index >= 15 and player.index < len(player.character.jump_c_animation)-1 \
and player.hit_box_fill_once is False and player.hit_confirm is False:
jump_c_hitbox = HitBox(player.x - 30 + player.off_set_value, player.y + 150, 50, 70,
damage=10, hitstun=15, knockback= 5,knockdown= False, blocktype='overhead'
,attack_level=3)
self.hit_box.append(jump_c_hitbox)
player.character.image = pygame.transform.flip(player.character.jump_c_animation[player.index]
, True, False)
player.index += 1
if player.index > 16 and player.index < len(player.character.jump_c_animation)-1:
player.y -= 6
if player.index > len(player.character.jump_c_animation)-1:
if player.multiplier == 1:
player.character.image = pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'),
(130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'),
(130, 250)), True, False)
player.off_set = False
player.jump_attack_c = False
player.air_attack_once = True
player.hit_box_fill_once = False
if player.isJumping:
player.isJumping = False
player.isDescending = True
def grab_attack(self, player):
"""
Plays the character's grab attack and updates grab box.
:param player:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.index > len(player.character.grab_animation)-1:
player.index = 0
else:
if player.multiplier == 1:
player.character.image = player.character.grab_animation[player.index]
if player.index >= 16 and player.index < len(player.character.grab_animation)-1:
grab_box = GrabBox(player.x + 100 + player.off_set_value, player.y + 80, 30, 30)
self.hit_box.append(grab_box)
else:
if player.index >= 0 and player.index < len(player.character.grab_animation)-1:
if player.off_set is False:
player.off_set = True
player.off_set_value = 60
player.x -= 60
else:
if player.off_set is True:
player.off_set = False
player.off_set_value = 0
player.x += 60
player.character.image = pygame.transform.flip(player.character.grab_animation[player.index]
, True, False)
if player.index >= 16 and player.index < len(player.character.grab_animation)-1:
grab_box = GrabBox(player.x + player.off_set_value, player.y + 80, 30, 30)
self.hit_box.append(grab_box)
player.index += 1
if player.index > 20:
self.hit_box.clear()
if player.index > len(player.character.grab_animation)-1:
if player.multiplier == 1:
player.character.image = player.character.standing[0]
else:
player.character.image = pygame.transform.flip(player.character.standing[0], True, False)
player.setAction = False
player.off_set = False
player.off_set_value = 0
player.grabbing = False
player.hit_box_fill_once = False
def throw_attack(self, player, two):
"""
Plays the character's throw and updates the opposing player's state after the throw.
:param player:
:param two:
:return:
"""
if player.off_set:
player.x += player.off_set_value
player.off_set = False
player.off_set_value = 0
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.multiplier == 1:
if two.off_set is False:
two.y += 130
two.health_points -= 8
two.off_set = True
player.character.image = \
pygame.transform.scale(pygame.image.load
('Sprites/Gina/GinaCrouch1.png'), (130, 250))
two.character.image = \
pygame.transform.flip(two.character.wakeup_animation[0], True, False)
if player.animation_fill_once is False:
self.effects_animation.append(ThrowDust(player.x + 50, player.y))
player.animation_fill_once = True
else:
if two.off_set is False:
two.y += 130
two.health_points -= 8
two.off_set = True
player.character.image = \
pygame.transform.flip(pygame.transform.scale(pygame.image.load
('Sprites/Gina/GinaCrouch1.png'), (130, 250)), True, False)
two.character.image = two.character.wakeup_animation[0]
if player.animation_fill_once is False:
self.effects_animation.append(ThrowDust(player.x - 120, player.y))
player.animation_fill_once = True
player.index += 1
if player.index > 40:
self.hit_box.clear()
player.setAction = False
player.animation_fill_once = False
player.throw = False
two.setAction = False
two.grabbed = False
two.getting_up = True
two.off_set = False
two.timer = 1
# -----------------------------------------------------------------------------------------------------------------
def special_one(self, player):
"""
Plays the character's special attack one and updates hit box.
:param player:
:return:
"""
if player.setAction is False:
if player.facingRight is True:
player.multiplier = 1
else:
player.multiplier = -1
player.index = 0
player.setAction = True
if player.index > len(player.character.special_one_animation)-1:
player.index = 0
else:
if player.multiplier == 1:
player.character.image = player.character.special_one_animation[player.index]
if player.index >= 24 and player.index < len(player.character.special_one_animation)-1\
and player.hit_box_fill_once is False and player.meter_points >= 20:
special_box = GinaFireBall(100, player.x + self.rect.width, player.y + 50, 80, 80,
damage=12, hitstun=10, knockback= 3,knockdown= False, blocktype='stand'
,attack_level=2)
self.hit_box.append(special_box)
player.hit_box_fill_once = True
player.meter_points -= 20
else:
if player.index >= 0 and player.index < len(player.character.special_one_animation)-1:
if player.off_set is False:
player.off_set = True
player.off_set_value = 40
player.x -= 40
else:
if player.off_set is True:
player.off_set = False
player.off_set_value = 0
player.x += 40
player.character.image = pygame.transform.flip(player.character.special_one_animation[player.index]
, True, False)
if player.index >= 24 and player.index < len(player.character.special_one_animation)-1\
and player.hit_box_fill_once is False and player.meter_points >= 20:
special_box = GinaFireBall(-100, player.x, player.y + 50, 80, 80,
damage=12, hitstun=10, knockback= 3,knockdown= False, blocktype='stand'
,attack_level=2)
self.hit_box.append(special_box)
player.hit_box_fill_once = True
player.meter_points -= 20
player.index += 1
if player.index > len(player.character.special_one_animation)-1:
if player.multiplier == 1:
player.character.image = player.character.standing[0]
else:
player.character.image = pygame.transform.flip(player.character.standing[0], True, False)
player.setAction = False
player.special_one = False
player.hit_box_fill_once = False
# -----------------------------------------------------------------------------------------------------------------
def being_damaged(self, player):
"""
Plays the character's damaged animation and updates their hurt box.
:param player:
:return:
"""
if player.facingRight and not player.setAction:
player.multiplier = -1
player.setAction = True
elif not player.facingRight and not player.setAction:
player.multiplier = 1
player.setAction = True
if player.multiplier == -1:
if player.hitstun > player.max_hitstun/2:
if player.animation_fill_once is False:
self.effects_animation.append(DamageAnimation(player.x, player.y - 150, True, player.attack_level))
player.animation_fill_once = True
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt1.png'), (130, 250))
else:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt2.png'), (130, 250))
player.hitstun -= 1
else:
if player.hitstun > player.max_hitstun/2:
if player.animation_fill_once is False:
self.effects_animation.append(DamageAnimation(player.x - 100, player.y - 150, False
, player.attack_level))
player.animation_fill_once = True
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt1.png'),
(130, 250)), True, False)
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt2.png'),
(130, 250)), True, False)
player.hitstun -= 1
player.x += player.knockback * player.multiplier
if player.hitstun <= 0:
if player.knockdown is True:
if player.loser is False:
player.y = player.yKnockdown
if player.facingRight:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaKnockdown.png'), (250, 130))
player.getting_up = True
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaKnockdown.png'),
(250, 130)), True, False)
player.getting_up = True
player.got_hit = False
player.animation_fill_once = False
player.setAction = False
player.hitstun = 0
player.max_hitstun = 0
player.knockback = 0
player.attack_level = 0
def being_air_damaged(self, player):
"""
Plays the character's damaged animation in the air and updates their hurt box.
:param player:
:return:
"""
if player.facingRight and not player.setAction:
player.isJumping = False
player.isDescending = False
player.multiplier = -1
player.setAction = True
elif not player.facingRight and not player.setAction:
player.isJumping = False
player.isDescending = False
player.multiplier = 1
player.setAction = True
if player.facingRight:
if player.animation_fill_once is False:
self.effects_animation.append(DamageAnimation(player.x, player.y - 150, False, player.attack_level))
player.animation_fill_once = True
if player.hitstun > player.max_hitstun/2:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt1.png'), (130, 250))
player.y -= 7
player.current_jump += 15
elif player.hitstun > player.max_hitstun/4 and player.hitstun < player.max_hitstun/2:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt2.png'), (130, 250))
elif player.hitstun < player.max_hitstun/4:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaAirHurt.png'), (150, 280))
player.y += 7
player.current_jump -= 8
player.hitstun -= 1
else:
if player.animation_fill_once is False:
self.effects_animation.append(DamageAnimation(player.x - 100, player.y - 150, False
, player.attack_level))
player.animation_fill_once = True
if player.hitstun > player.max_hitstun/2:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt1.png'),
(130, 250)), True, False)
player.y -= 7
player.current_jump += 15
elif player.hitstun > player.max_hitstun/4 and player.hitstun < player.max_hitstun/2:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaHurt2.png'),
(130, 250)), True, False)
elif player.hitstun < player.max_hitstun/4:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaAirHurt.png'),
(150, 280)), True, False)
player.y += 7
player.current_jump -= 8
player.hitstun -= 1
player.x += player.knockback * 1.5 * player.multiplier
if player.current_jump <= 0:
player.knockdown = True
if player.facingRight:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaKnockdown.png'), (250, 130))
player.getting_up = True
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaKnockdown.png'),
(250, 130)), True, False)
player.getting_up = True
player.current_jump = 0
if player.loser is False:
player.y = player.yKnockdown
player.animation_fill_once = False
player.got_air_hit = False
player.setAction = False
player.isJumping = False
player.neutral_jumping = False
player.forward_jumping = False
player.back_jumping = False
player.isDescending = False
player.hitstun = 0
player.max_hitstun = 0
player.knockback = 0
def wake_up(self, player):
"""
Plays the character's wake up animation and updates their hurt box.
:param player:
:return:
"""
if player.facingRight and not player.setAction:
player.index = 0
player.multiplier = 1
player.setAction = True
elif not player.facingRight and not player.setAction:
player.index = 0
player.multiplier = -1
player.setAction = True
if player.facingRight:
if player.index > len(player.character.wakeup_animation)-1:
player.index = 0
else:
if player.index >= 0 and player.index < 30:
self.hurt_box.append(InvincibleBox(player.x + 50 + player.off_set_value
, player.y, self.rect.width + 30, self.rect.height - 50))
elif player.index >= 30 and player.index < len(player.character.wakeup_animation)-1:
self.hurt_box.append(InvincibleBox(player.x + 30 + player.off_set_value
, player.y, self.rect.width + 30, self.rect.height - 50))
if player.index >= 30 and player.index < len(player.character.wakeup_animation)-1:
if player.off_set is False:
player.y -= 60
player.off_set = True
player.off_set_value = 0
else:
if player.off_set is True:
player.y += 60
player.off_set = False
player.off_set_value = 0
player.character.image = player.character.wakeup_animation[player.index]
player.index += 1
else:
if player.index > len(player.character.wakeup_animation)-1:
player.index = 0
else:
if player.index >= 0 and player.index < 30:
self.hurt_box.append(InvincibleBox(player.x + 50 + player.off_set_value
, player.y, self.rect.width + 30, self.rect.height - 50))
elif player.index >= 30 and player.index < len(player.character.wakeup_animation)-1:
self.hurt_box.append(InvincibleBox(player.x + 30 + player.off_set_value
, player.y, self.rect.width + 30, self.rect.height - 50))
if player.index >= 30 and player.index < len(player.character.wakeup_animation)-1:
if player.off_set is False:
player.y -= 60
player.x -= 50
player.off_set = True
player.off_set_value = 50
else:
if player.off_set is True:
player.y += 60
player.x += 50
player.off_set = False
player.off_set_value = 0
player.character.image = pygame.transform.flip(
player.character.wakeup_animation[player.index], True, False)
player.index += 1
if player.index > len(player.character.wakeup_animation) - 1:
if player.facingRight:
player.character.image = player.character.standing[0]
else:
player.character.image = pygame.transform.flip(player.character.standing[0], True, False)
player.knockdown = False
player.getting_up = False
player.setAction = False
player.y = player.yOriginal
def being_blocked(self, player):
"""
Plays the character's block animation and updates their hurt box.
:param player:
:return:
"""
if player.facingRight and not player.setAction:
player.multiplier = -1
player.setAction = True
elif not player.facingRight and not player.setAction:
player.multiplier = 1
player.setAction = True
if player.facingRight:
if player.hitstun > 0:
if player.blockingLow:
if player.animation_fill_once is False:
self.effects_animation.append(BlockAnimation(player.x, player.y - 25, True))
player.animation_fill_once = True
player.character.image = \
pygame.transform.scale(pygame.image.load
('Sprites/Gina/GinaCrouchBlock.png'), (130, 250))
else:
if player.animation_fill_once is False:
self.effects_animation.append(BlockAnimation(player.x, player.y - 50, True))
player.animation_fill_once = True
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaStandBlock.png'), (130, 250))
player.hitstun -= 1
else:
if player.hitstun > 0:
if player.blockingLow:
if player.animation_fill_once is False:
self.effects_animation.append(BlockAnimation(player.x - 80, player.y - 25, True))
player.animation_fill_once = True
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaCrouchBlock.png'),
(130, 250)), True, False)
else:
if player.animation_fill_once is False:
self.effects_animation.append(BlockAnimation(player.x - 80, player.y - 50, True))
player.animation_fill_once = True
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaStandBlock.png'),
(130, 250)), True, False)
player.hitstun -= 1
player.x += player.knockback * player.multiplier
if player.hitstun <= 0:
player.animation_fill_once = False
player.block_hit = False
player.setAction = False
player.hitstun = 0
player.max_hitstun = 0
player.knockback = 0
def push_back_grab(self, player):
"""
Plays the character's animation when repeling a grab from the opposing enemy.
:param player:
:return:
"""
if player.off_set:
player.x += player.off_set_value
player.off_set = False
player.off_set_value = 0
if player.facingRight and not player.setAction:
player.index = 0
player.multiplier = -1
player.setAction = True
elif not player.facingRight and not player.setAction:
player.index = 0
player.multiplier = 1
player.setAction = True
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load
('Sprites/Gina/GinaHurt1.png'), (130, 250))
player.x -= 3
else:
player.character.image = \
pygame.transform.flip(pygame.transform.scale(pygame.image.load
('Sprites/Gina/GinaHurt1.png'), (130, 250)), True, False)
player.x += 3
player.index += 1
if player.index > 25:
self.hit_box.clear()
player.setAction = False
player.push_back = False
# -----------------------------------------------------------------------------------------------------------------
def jumping(self, player):
"""
Plays either the character's jump animation in neutral, forward or backwards. This method also checks to
see if the player is currently attacking in the air.
:param player:
:return:
"""
if player.facingRight and not player.setAction:
player.index = 0
player.multiplier = 1
player.setAction = True
elif not player.facingRight and not player.setAction:
player.index = 0
player.multiplier = -1
player.setAction = True
momentum = 1
if player.jump_attack_c is True:
momentum = 0
else:
momentum = 1
if player.back_jumping:
if player.isJumping:
if player.current_jump < len(player.character.jump)/6:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump1.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump1.png'),
(130, 250)), True, False)
player.y -= 0 * momentum
elif player.current_jump > len(player.character.jump)/6 and \
player.current_jump < len(player.character.jump)/4:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump2.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump2.png'),
(130, 250)), True, False)
player.y -= 12.2* momentum
player.x -= 10* momentum
elif player.current_jump > len(player.character.jump)/4 and \
player.current_jump < len(player.character.jump)/1.7:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'),
(130, 250)), True, False)
player.y -= 18.1* momentum
player.x -= 8* momentum
elif player.current_jump > len(player.character.jump)/1.7 and \
player.current_jump < len(player.character.jump):
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'),
(130, 250)), True, False)
player.y -= 15.3* momentum
player.x -= 7* momentum
elif player.current_jump > len(player.character.jump):
player.y -= 12.1* momentum
player.current_jump += 12.0* momentum
elif player.isDescending:
if player.current_jump < len(player.character.jump) and \
player.current_jump > len(player.character.jump)/1.7:
player.y += 10* momentum
player.x -= 7* momentum
elif player.current_jump < len(player.character.jump)/1.7 and \
player.current_jump > len(player.character.jump)/4:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'),
(130, 250)), True, False)
player.y += 15* momentum
player.x -= 6* momentum
elif player.current_jump < len(player.character.jump)/4 and \
player.current_jump > len(player.character.jump)/6:
player.y += 18* momentum
player.x -= 5* momentum
elif player.current_jump < len(player.character.jump)/6:
player.y += 22* momentum
player.current_jump -= 12.0* momentum
elif player.forward_jumping:
if player.isJumping:
if player.current_jump < len(player.character.jump)/6:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump1.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump1.png'),
(130, 250)), True, False)
player.y -= 0* momentum
elif player.current_jump > len(player.character.jump)/6 and \
player.current_jump < len(player.character.jump)/4:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump2.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump2.png'),
(130, 250)), True, False)
player.y -= 12.2* momentum
player.x += 4* momentum
elif player.current_jump > len(player.character.jump)/4 and \
player.current_jump < len(player.character.jump)/1.7:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'),
(130, 250)), True, False)
player.y -= 17.1* momentum
player.x += 6* momentum
elif player.current_jump > len(player.character.jump)/1.7 and \
player.current_jump < len(player.character.jump):
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'),
(130, 250)), True, False)
player.y -= 15.3* momentum
player.x += 8* momentum
elif player.current_jump > len(player.character.jump):
player.y -= 12.1* momentum
player.current_jump += 12.0* momentum
elif player.isDescending:
if player.current_jump < len(player.character.jump) and \
player.current_jump > len(player.character.jump)/1.7:
player.y += 10* momentum
player.x += 8* momentum
elif player.current_jump < len(player.character.jump)/1.7 and \
player.current_jump > len(player.character.jump)/4:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'),
(130, 250)), True, False)
player.y += 15* momentum
player.x += 6* momentum
elif player.current_jump < len(player.character.jump)/4 and \
player.current_jump > len(player.character.jump)/6:
player.y += 18* momentum
player.x += 4* momentum
elif player.current_jump < len(player.character.jump)/6:
player.y += 22* momentum
player.current_jump -= 12.0* momentum
elif player.neutral_jumping:
if player.isJumping:
if player.current_jump < len(player.character.jump)/6:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump1.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump1.png'),
(130, 250)), True, False)
player.y -= 0* momentum
elif player.current_jump > len(player.character.jump)/6 and \
player.current_jump < len(player.character.jump)/4:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump2.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump2.png'),
(130, 250)), True, False)
player.y -= 12.2* momentum
elif player.current_jump > len(player.character.jump)/4 and \
player.current_jump < len(player.character.jump)/1.7:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'),
(130, 250)), True, False)
player.y -= 17.1* momentum
elif player.current_jump > len(player.character.jump)/1.7 and \
player.current_jump < len(player.character.jump):
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump4.png'),
(130, 250)), True, False)
player.y -= 15.3* momentum
elif player.current_jump > len(player.character.jump):
player.y -= 12.1* momentum
player.current_jump += 12.0* momentum
elif player.isDescending:
if player.current_jump < len(player.character.jump) and \
player.current_jump > len(player.character.jump)/1.7:
player.y += 10* momentum
elif player.current_jump < len(player.character.jump)/1.7 and \
player.current_jump > len(player.character.jump)/4:
if player.multiplier == 1:
player.character.image = \
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'), (130, 250))
else:
player.character.image = pygame.transform.flip(
pygame.transform.scale(pygame.image.load('Sprites/Gina/GinaJump3.png'),
(130, 250)), True, False)
player.y += 15* momentum
elif player.current_jump < len(player.character.jump)/4 and \
player.current_jump > len(player.character.jump)/6:
player.y += 18* momentum
elif player.current_jump < len(player.character.jump)/6:
player.y += 22* momentum
player.current_jump -= 12.0* momentum
if player.jump_attack_a and player.air_attack_once is False:
self.jump_a(player)
if player.jump_attack_b and player.air_attack_once is False:
self.jump_b(player)
if player.jump_attack_c and player.air_attack_once is False:
self.jump_c(player)
if player.current_jump >= len(player.character.jump) and player.isJumping:
player.isJumping = False
player.isDescending = True
elif player.current_jump <= 0 and player.isDescending:
if player.multiplier == 1:
player.character.image = player.character.standing[0]
else:
player.character.image = pygame.transform.flip(
player.character.standing[0], True, False)
self.hit_box.clear()
if player.off_set:
player.off_set = False
player.x += player.off_set_value
player.off_set_value = 0
player.isDescending = False
player.neutral_jumping = False
player.forward_jumping = False
player.back_jumping = False
player.jump_attack_a = False
player.jump_attack_b = False
player.jump_attack_c = False
player.air_attack_once = False
player.hit_confirm = False
player.setAction = False
player.y = player.yOriginal
player.current_jump = 0
def forward_dash(self, player):
"""
Plays the character's dash forward animation and updates their hurt box.
:param player:
:return:
"""
collide_multiplier = 1
if player.facingRight and not player.setAction:
player.multiplier = 1
player.setAction = True
elif not player.facingRight and not player.setAction:
player.multiplier = -1
player.setAction = True
if player.dash_collide is True:
collide_multiplier = 0
if player.currentDash < len(player.character.dash)/2:
player.x += 6.7*player.multiplier*collide_multiplier
player.y -= .53
elif player.currentDash > len(player.character.dash)/2 and \
player.currentDash < len(player.character.dash)/1.5:
player.x += 5.75*player.multiplier
elif player.currentDash > len(player.character.dash)/1.5:
player.x += 5.175*player.multiplier*collide_multiplier
player.y += 0.46
player.currentDash += 6
if player.currentDash >= len(player.character.dash):
player.isDashing = False
player.dash_collide = False
player.setAction = False
player.y = player.yOriginal
player.currentDash = 0
if player.index >= len(player.character.dash):
player.index = 0
else:
if player.multiplier == 1:
player.character.image = player.character.dash[player.index]
player.index += 1
else:
player.character.image = \
pygame.transform.flip(player.character.dash[player.index], True, False)
player.index += 1
def back_dash(self, player, x):
"""
Plays the character's back dash animation and updates their hurt box.
:param player:
:param x:
:return:
"""
if player.facingRight and not player.setAction:
player.multiplier = 1
player.setAction = True
elif not player.facingRight and not player.setAction:
player.multiplier = -1
player.setAction = True
if player.currentDash < len(player.character.backdash)/2:
player.x += 3.7*(-1)*player.multiplier
player.y -= .56
elif player.currentDash > len(player.character.backdash)/2 \
and player.currentDash < len(player.character.backdash)/1.5:
player.x += 3.35*(-1)*player.multiplier
elif player.currentDash > len(player.character.backdash)/1.5:
player.x += 3.175*(-1)*player.multiplier
player.y += .48
player.currentDash += 6
if player.currentDash >= len(player.character.backdash):
player.isBackDashing = False
player.setAction = False
player.y = player.yOriginal
player.currentDash = 0
if player.index >= len(player.character.backdash):
player.index = 0
else:
if player.multiplier == 1:
player.character.image = player.character.backdash[player.index]
player.index += 1
else:
player.character.image = \
pygame.transform.flip(player.character.backdash[player.index], True, False)
player.index += 1
# -----------------------------------------------------------------------------------------------------------------
def update(self, player, x, two):
"""
This method plays and checks all of the status of the player. If certain booleans are activated in the player
class, the character will act accordingly to that action.
:param player:
:param x:
:param two:
:return:
"""
for hits in self.hit_box:
fire_ball = isinstance(hits, GinaFireBall)
if fire_ball:
if hits.distance > 0:
if hits.distance_traveled >= hits.distance:
self.hit_box.remove(hits)
else:
if hits.distance_traveled <= hits.distance:
self.hit_box.remove(hits)
if player.block_hit:
self.being_blocked(player)
elif player.got_hit:
self.being_damaged(player)
elif player.got_air_hit:
self.being_air_damaged(player)
elif player.neutral_jumping or player.forward_jumping \
or player.back_jumping or player.isDescending:
self.jumping(player)
elif player.isDashing:
player.meter_points += 0.2
self.forward_dash(player)
elif player.isBackDashing:
self.back_dash(player, x)
elif player.grabbed:
pass
elif player.grabbing:
self.grab_attack(player)
elif player.throw:
self.throw_attack(player, two)
elif player.push_back:
self.push_back_grab(player)
elif player.crouch_attack_a:
self.crouch_a(player)
elif player.attack_a:
self.stand_a(player)
elif player.crouch_attack_b:
self.crouch_b(player)
elif player.attack_b:
self.stand_b(player)
elif player.crouch_attack_c:
self.crouch_c(player)
elif player.attack_c:
self.stand_c(player)
elif player.special_one:
self.special_one(player)
elif player.winner:
self.win_state(player)
elif player.loser:
self.lose_state(player, two)
elif player.getting_up:
self.wake_up(player)
elif x < 0:
player.x += x
if player.facingRight:
if player.index >= len(player.character.walkBackward):
player.index = 0
else:
if player.facingRight:
player.character.image = player.character.walkBackward[player.index]
player.index += 1
else:
player.character.image = \
pygame.transform.flip(player.character.walkBackward[player.index],True, False)
player.index += 1
else:
if player.index >= len(player.character.walkFoward):
player.index = 0
else:
if player.facingRight:
player.character.image = player.character.walkFoward[player.index]
player.index += 1
else:
player.character.image = \
pygame.transform.flip(player.character.walkFoward[player.index], True, False)
player.index += 1
elif x > 0:
player.x += x
if player.facingRight:
if player.index >= len(player.character.walkFoward):
player.index = 0
else:
if player.facingRight:
player.character.image = player.character.walkFoward[player.index]
player.index += 1
else:
player.character.image = \
pygame.transform.flip(player.character.walkFoward[player.index], True, False)
player.index += 1
else:
if player.index >= len(player.character.walkBackward):
player.index = 0
else:
if player.facingRight:
player.character.image = player.character.walkBackward[player.index]
player.index += 1
else:
player.character.image = \
pygame.transform.flip(player.character.walkBackward[player.index], True, False)
player.index += 1
elif x == 0:
player.x += x
if player.crouching != True:
if player.index >= len(player.character.standing):
player.index = 0
else:
if player.facingRight:
player.character.image = player.character.standing[player.index]
player.index += 1
else:
player.character.image = \
pygame.transform.flip(player.character.standing[player.index], True, False)
player.index += 1
else:
if player.index >= len(player.character.crouching):
player.index = 0
else:
if player.facingRight:
player.character.image = player.character.crouching[player.index]
player.index += 1
else:
player.character.image = \
pygame.transform.flip(player.character.crouching[player.index], True, False)
player.index += 1
| 2.71875 | 3 |
lib/tree.py | usermicrodevices/pywingui | 0 | 12788699 | <reponame>usermicrodevices/pywingui
## Copyright (c) 2003 <NAME>
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from pywingui.windows import *
from pywingui.wtl import *
from pywingui.comctl import *
class Tree(TreeView):
def __init__(self, *args, **kwargs):
TreeView.__init__(self, *args, **kwargs)
self.m_interceptor = self.Intercept(self.GetParent(), self._msg_map_)
def dispose(self):
self.m_interceptor.dispose()
del self.m_interceptor
def SetItemText(self, item, txt):
itemEx = TVITEMEX()
itemEx.mask = TVIF_TEXT
itemEx.hItem = item
itemEx.pszText = txt
return self.SendMessage(TVM_SETITEM, 0, byref(itemEx))
def GetItemData(self, hItem):
itemEx = TVITEMEX()
itemEx.hItem = hItem
itemEx.mask = TVIF_PARAM
self.GetItem(itemEx)
return itemEx.lParam
| 1.828125 | 2 |
13b-iterative-dont-do-that.py | jpparent/aoc2020 | 0 | 12788700 | f = open('13.txt')
rows = f.readlines()
f.close()
ids = [int(x) if x != 'x' else 0 for x in rows[1].split(',')]
t = 0
i = 0
earliestT = None
while True:
print(t,end='\r')
if i == len(ids):
break # matched all
elif ids[i] == 0:
i += 1
t += 1
continue
elif t % ids[i] == 0:
earliestT = t if not earliestT else earliestT
i += 1
t += 1
continue
else:
if i == 0:
t += 1
else:
i = 0 # restart check
earliestT = None
print()
print(earliestT) | 2.921875 | 3 |
Fever.py | CSID-DGU/2020-2-OSSP-Ssanhocho-7 | 0 | 12788701 | import pygame
import Levels
from Sprites import *
is_fever = False
class Fever():
global fever_score
def __init__(self):
self.is_fever = False
def feverTime(self,hero_sprites,ghost_sprites):
pygame.sprite.groupcollide(hero_sprites, ghost_sprites, False, False)
return True
| 2.875 | 3 |
movies-client.py | povstenko/movielens-process | 0 | 12788702 | """Get top N rated movies from MovieLens
This script allows user to get information about films.
This file can also be imported as a module and contains the following
functions:
* display_movies - Print data in csv format
* get_arguments - Construct the argument parser and get the arguments
* main - the main function of the script
"""
# import the necessary packages
import time
import argparse
import logging as log
from config import *
from mysql.connector import (connection)
def fetch_movies_data(cnx, n=None, regexp=None, year_from=None, year_to=None, genres=None):
""" Generator function to fetch data rows from stored procedure with arguments
Parameters
----------
cnx :
MySqlConnection to database
n : int, optional
The number of top rated movies for each genre, by default None
regexp : str, optional
Filter on name of the film, by default None
year_from : int, optional
The lower boundary of year filter, by default None
year_to : int, optional
The lower boundary of year filter, by default None
genres : str, optional
User-defined genre filter. can be multiple, by default None
Yields
-------
tuple
row of MySqlConnector data from stored procedure
"""
log.info('fetching movies')
cursor = cnx.cursor()
# NULL if None
if not n:
n = 'NULL'
if not regexp:
regexp = 'NULL'
else:
regexp = f"'{regexp}'"
if not year_from:
year_from = 'NULL'
if not year_to:
year_to = 'NULL'
if not genres:
genres = 'NULL'
else:
genres = f"'{genres}'"
try:
query_string = f"CALL spr_find_top_rated_movies({n}, {regexp}, {year_from}, {year_to}, {genres});"
for result in cursor.execute(query_string, multi=True):
if result.with_rows:
log.debug(f'Rows produced by statement "{result.statement}":')
for row in result.fetchall():
log.debug(row)
yield row
except Exception as e:
log.exception(e)
log.debug(query_string)
cursor.close()
def display_movies(cnx, n=None, regexp=None, year_from=None, year_to=None, genres=None, delimiter=',') -> None:
""" Display movies from called stored procedure in csv format
Parameters
----------
cnx :
MySqlConnection to database
n : int, optional
The number of top rated movies for each genre, by default None
regexp : str, optional
Filter on name of the film, by default None
year_from : int, optional
The lower boundary of year filter, by default None
year_to : int, optional
The lower boundary of year filter, by default None
genres : str, optional
User-defined genre filter. can be multiple, by default None
delimiter : str, optional
Separator of csv format, by default ','
"""
try:
column_names = ['movieId', 'title', 'genres', 'year', 'rating']
header = ', '.join(column_names)
print(header)
for row in fetch_movies_data(cnx, n, regexp, year_from, year_to, genres):
csv_row = ''
for attr in row:
if delimiter in str(attr):
attr = f'"{attr}"'
csv_row += delimiter + str(attr)
csv_row = csv_row[1:]
print(csv_row)
except Exception as e:
log.exception(e)
def get_arguments() -> dict:
"""Construct the argument parser and get the arguments
Returns
-------
dict
Dictionary of arguments and paramenters
"""
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument("-n", "--topN", type=int,
help="the number of top rated movies for each genre. (example: 3)")
ap.add_argument("-g", "--genres", type=str,
help="user-defined genre filter. can be multiple. (example: Comedy|Adventure)")
ap.add_argument("-f", "--year_from", type=int,
help="the lower boundary of year filter (example: 1980)")
ap.add_argument("-t", "--year_to", type=int,
help="the lower boundary of year filter (example: 2010)")
ap.add_argument("-r", "--regexp", type=str,
help="filter on name of the film (example: love)")
return vars(ap.parse_args())
def main():
log.basicConfig(level=log.getLevelName(CONFIG['logging']['level']),
filename=CONFIG['logging']['filename'],
filemode=CONFIG['logging']['filemode'],
format=CONFIG['logging']['format'],
datefmt=CONFIG['logging']['datefmt'])
log.info('Start')
# save start time for calculating
time_start = time.perf_counter()
# construct args
log.info('constructing argument parser')
args = get_arguments()
log.debug(f'arguments: {args}')
log.info('Done!')
try:
# DB connect
log.info('Opening connection to DB')
cnx = connection.MySQLConnection(**CONFIG['db_connect'])
log.info('Done!')
log.info('fetching and printing movies')
display_movies(cnx, args['topN'], args['regexp'],
args['year_from'], args['year_to'], args['genres'])
log.info('Done!')
except Exception as e:
log.error(e)
cnx.close()
log.info('Connection to DB closed')
time_elapsed = time.perf_counter() - time_start
log.info(f'Finish in {time_elapsed:.4f} secs')
if __name__ == "__main__":
main()
| 3.484375 | 3 |
OOPS_CONCEPT/Functional and Modular Progarmming/discount_product_7.py | abhigyan709/dsalgo | 1 | 12788703 | # buying 2 mobile
# returning only one 1 phone
# making sure that purchase_shoe() function won't accidentally modify the global value for mobile
# complication is increasing now
total_price_mobile = 0
total_price_shoe = 0
def purchase_mobile(price, brand):
global total_price_mobile
if brand == "Apple":
discount = 10
else:
discount = 5
total_price_mobile = price - price * discount / 100
print("Total price for Mobile is "+str(total_price_mobile))
def purchase_shoe(price, material):
global total_price_shoe
if material == "leather":
tax = 5
else:
tax = 2
total_price_shoe = price + price * tax / 100
print("Total price for shoe is "+str(total_price_shoe))
def return_mobile():
print("Refund price of the shoe is: ", total_price_mobile)
def return_shoe():
print("Refund price for shoe is ", total_price_shoe)
purchase_mobile(20000, "Apple")
purchase_shoe(200, "leather")
purchase_mobile(2000, "Samsung")
return_mobile()
| 3.8125 | 4 |
power/coding-challenges/hour-of-python/gerund-slicing.py | TuxedoMeow/Hello-World | 0 | 12788704 | """
Make a function gerund_infinitive that, given a string ending in "ing",
returns the rest of the string prefixed with "to ". If the string
doesn't end in "ing", return "That's not a gerund!"
>>>> gerund_infinitive("building")
to build
>>>> gerund_infinitive("build")
That's not a gerund!
"""
def gerund_infinitive(string):
# Add code here that returns the answer.
if string[-3:] == "ing":
return "to " + string[:-3]
else:
return "That's not a gerund!"
# Add more statements to test what your function does:
print(gerund_infinitive("building"))
print(gerund_infinitive("build"))
"""
to build
That's not a gerund!
""" | 4.625 | 5 |
fxwebgen/utils.py | tiliado/fxwebgen | 0 | 12788705 | <gh_stars>0
import os
from argparse import HelpFormatter
from typing import Optional
def file_mtime(path: str) -> float:
try:
return os.path.getmtime(path)
except OSError:
return -1
def abspath(base_path: Optional[str], path: str) -> str:
assert path, f'Path must be specified.'
assert base_path is None or os.path.isabs(base_path), f'Base path "{base_path}" is not absolute.'
if path.startswith('~'):
path = os.path.expanduser(path)
if os.path.isabs(path):
return path
return os.path.join(base_path, path) if base_path else os.path.abspath(path)
def get_indent(text: str) -> str:
indent = []
for char in text:
if not char.isspace():
break
indent.append(char)
return ''.join(indent)
class SmartFormatter(HelpFormatter):
def _fill_text(self, text: str, width: int, indent: int) -> str:
# noinspection PyProtectedMember
return '\n'.join(HelpFormatter._fill_text(self, line, width, indent) for line in text.splitlines())
| 2.90625 | 3 |
concat.py | voelkerb/matroska_plotter | 0 | 12788706 | <gh_stars>0
# !/usr/bin/python
import os
import sys
from os.path import basename
import argparse
from mkv.mkv import concat
def initParser():
parser = argparse.ArgumentParser()
parser.add_argument("--inFolder", type=str, default=None,
help="Folder that contains mkv files")
parser.add_argument("--inFiles", type=argparse.FileType('r'), nargs='+', default=None,
help="Path(s) to mkv files to combine")
parser.add_argument('--outpath', type=str,
help="Path to stored mkv")
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-r', '--recursive', action='store_true')
return parser
# _______________Can be called as main__________________
if __name__ == '__main__':
parser = initParser()
args = parser.parse_args()
if args.recursive and args.inFolder is not None:
paths = [os.path.join(args.inFolder, o) for o in os.listdir(args.inFolder) if os.path.isdir(os.path.join(args.inFolder,o))]
for path in paths:
files = [os.path.join(path, o) for o in os.listdir(path) if os.path.isfile(os.path.join(path,o))]
concat(files, os.path.join(path, "joined.mkv"), verbose=args.verbose)
print(paths)
else:
files = []
if args.inFolder is not None:
files = [os.path.join(args.inFolder, o) for o in os.listdir(args.inFolder) if os.path.isfile(os.path.join(args.inFolder,o))]
elif args.inFiles is not None:
files = [file.name for file in args.inFiles]
files = [file for file in files if os.path.basename(file)[0] != "."]
concat([f for f in files], args.outpath, verbose=args.verbose)
print(("Bye Bye from " + str(os.path.basename(__file__))))
| 3.09375 | 3 |
month01/day11/exercise03.py | Amiao-miao/all-codes | 1 | 12788707 | <reponame>Amiao-miao/all-codes<gh_stars>1-10
"""
练习1:以面向对象思想,描述下列情景.
小明请保洁打扫卫生
"""
#小明每次雇佣新保洁
"""
class Client:
def __init__(self, name=""):
self.name=name
def engage(self):
print("雇佣")
cleaner=Cleaner()
cleaner.clean()
class Cleaner:
def clean(self):
print("打扫")
xiaoming=Client("小明")
xiaoming.engage()
"""
#小明每次雇佣自己的保洁
"""
class Client:
def __init__(self, name=""):
self.name=name
self.__cleaner=Cleaner()
def engage(self):
print("雇佣")
self.__cleaner.clean()
class Cleaner:
def clean(self):
print("打扫")
xiaoming=Client("小明")
xiaoming.engage()
"""
#小明每次通知传入的参数
class Client:
def __init__(self, name=""):
self.name = name
def engage(self,people):
print("雇佣")
people.clean()
class Cleaner:
def clean(self):
print("打扫")
xiaoming = Client("小明")
people=Cleaner()
xiaoming.engage(people)
| 3.5 | 4 |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Analyses/Energy/__init__.py | Vinicius-Tanigawa/Undergraduate-Research-Project | 0 | 12788708 | <filename>SUAVE/SUAVE-2.5.0/trunk/SUAVE/Analyses/Energy/__init__.py
## @defgroup Analyses-Energy Energy
# This is the analysis that controls energy network evaluations.
# @ingroup Analyses
from .Energy import Energy
| 1.054688 | 1 |
hardware/temperature_sensor/__init__.py | magnusnordlander/silvia-pi | 16 | 12788709 | from .EmulatedSensor import EmulatedSensor
try:
from .Max31865Sensor import Max31865Sensor
except NotImplementedError:
pass
| 1.1875 | 1 |
PositioningSolver/src/config.py | rodrigo-moliveira/PositioningSolver | 0 | 12788710 | <reponame>rodrigo-moliveira/PositioningSolver
"""Configuration handler
The configuration is a simple dictionary, with possible fallbacks
"""
from .utils.errors import ConfigError
class Config(dict):
"""Config class
inherits from dict class
"""
_instance = None
def get(self, *keys, fallback=None):
"""Retrieve a value in the config, if the value is not available
give the fallback value specified.
"""
fullkeys = list(keys).copy()
section, *keys = keys
out = super().get(section, fallback)
key = ""
while isinstance(out, dict):
key = keys.pop(0)
out = out.get(key, fallback)
if keys and out is not fallback:
raise ConfigError(
"Dict structure mismatch : Looked for '{}', stopped at '{}'".format(
".".join(fullkeys), key
)
)
return out
def set(self, *args):
"""Set a value in the config dictionary
The last argument is the value to set
Example
config.set('aaa', 'bbb', True)
# config = {'aaa': {'bbb': True}}
"""
# split arguments in keys and value
*first_keys, last_key, value = args
subdict = self
for k in first_keys:
subdict.setdefault(k, {})
subdict = subdict[k]
subdict[last_key] = value
def read_configure_json(self, filename):
# importing the module
import json
# Opening JSON file
with open(filename) as json_file:
data = json.load(json_file)
self.update(data)
config = Config()
def validate_config(algorithm_code):
from ..src.algorithms import __algorithms_config_info__
try:
config_info = __algorithms_config_info__[algorithm_code]
except KeyError:
raise ConfigError(f"key '{algorithm_code}' is not a valid algorithm code. Valid ones are "
f"{list(__algorithms_config_info__.keys())}")
# raise ConfigError(f"Missing parameter 'output1'")
| 2.765625 | 3 |
logger.py | dontsovcmc/waterius.alice | 0 | 12788711 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os
import sys
import settings
from datetime import datetime
class Logger(object):
def __init__(self):
log = logging.getLogger('')
log.setLevel(logging.INFO)
filename = datetime.utcnow().strftime('%Y.%m.%d_%H.%M_UTC.log')
log_dir = getattr(settings, 'LOG_DIR', 'logs')
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(os.path.join(log_dir, filename), mode='w')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
log.addHandler(fh)
# Задействовать консоль для вывода лога
console = sys.stderr
if console is not None:
# Вывод лога производится и на консоль и в файл (одновременно)
console = logging.StreamHandler(console)
console.setLevel(logging.INFO)
console.setFormatter(formatter)
log.addHandler(console)
Logger()
log = logging.getLogger('')
| 2.5625 | 3 |
model.py | wkhattak/Behavioural-Cloning | 0 | 12788712 | import csv
import cv2
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Convolution2D,Flatten,Dense,Lambda
from keras import optimizers
from keras import regularizers
BATCH_SIZE=128
BINS=25
BIN_RANGE=[-1.0,1.0]
EPOCHS=5
LEARNING_RATE = 0.001
LEARNING_RATE_DECAY = 0.0001
L2_REGULARIZATION = 0.001
ANGLE_CORRECTION_FACTOR = 0.20
def load_driving_log(csv_path):
'''
Loads the driving data log(csv).
Returns the line data as a string array.
'''
samples = []
with open(csv_path) as csvfile:
header_present = csv.Sniffer().has_header(csvfile.read(1024))
csvfile.seek(0) # back to first line
reader = csv.reader(csvfile)
if header_present:
next(reader) # skip the header
for line in reader:
samples.append(line)
return samples
def cleanup_data(samples):
'''
Removes any data with speed = 0.
Returns cleansed data array.
'''
cleansed_samples = []
for sample in samples:
if (float(sample[6]) != 0.0):# don't add zero speed frames
cleansed_samples.append(sample)
return cleansed_samples
def draw_angles_distribution(samples,bins,angle_range):
'''
Draws a bar chart showing the histogram of the passed in data.
Returns the left edge for each bin (apart form the last one for which right edge is returned)
and the bin value. The no. of bin edges is 'bin' + 1.
'''
angles = []
for sample in samples:
angle = float(sample[3])
angles.append(angle)
plt.figure(figsize=(14,7))
plt.ylabel('Count');
plt.xlabel('Angle');
bar_height_if_uniform_dist = len(samples)/bins
plt.plot(angle_range,[bar_height_if_uniform_dist,bar_height_if_uniform_dist])
plt.text(angle_range[0],bar_height_if_uniform_dist+50,'Uniform Distribution')
plt.title('Angle Histogram')
bin_values,bin_edges,_=plt.hist(angles,bins=bins,range=angle_range)
plt.show()
return bin_edges,bin_values
def balance_dataset(samples,bin_edges,bin_values,bins):
'''
Removes data where:
(i) angle is = +- 1.0
(ii) the bin size is greater than the average bin size
Returns the balanced array of sample data.
'''
balanced_samples = []
for sample in samples:
angle = float(sample[3])
if (angle == 1.0 or angle == -1.0): # Remove extreme angles
continue
# Total bin edges are = no. of bins + 1
# Bin edges are the left most value of the bin range aprt from the last one which is the right most,
# hence check if less than
potential_bins = np.where(bin_edges < angle)
# if no bin found
if (len(potential_bins[0]) == 0):
# For catching cases where the angle is exactly -1 or +1
potential_bins = np.where(bin_edges == angle)
if (len(potential_bins[0]) == 0):
raise Exception('No bin match found for angle:{}'.format(angle))
matched_bin_index = np.max(potential_bins)
matched_bin_value = bin_values[matched_bin_index]
avg_bin_size = len(samples)/bins
# Higher the %, the more that bin gets penalized
keep_probability = 1 - ((matched_bin_value + 10*avg_bin_size)/len(samples))
if (matched_bin_value > avg_bin_size):
if (np.random.rand() < keep_probability):
balanced_samples.append(sample)
else:
balanced_samples.append(sample)
return balanced_samples
def generator(samples,data_dir,batch_size=32):
'''
Generates a batch of images and angles.
Reads-in the sample data and for each record, adds center,left & right images + corresponding angles
Keep in mind that the returned batch is 3 X the passed in batch_size because for each record, 3 images are added.
The benefit of using a generator is that the entire dataset doesn't need to be processed at the same time,
rather only a subset is processed and fed to the model, which greatly helps when working with constrained memory.
'''
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0,num_samples,batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for line in batch_samples:
center_angle = float(line[3])
angles.append(center_angle)
left_angle = center_angle + ANGLE_CORRECTION_FACTOR
angles.append(left_angle)
right_angle = center_angle - ANGLE_CORRECTION_FACTOR
angles.append(right_angle)
center_img_path = data_dir + line[0]
center_img = cv2.cvtColor(cv2.imread(center_img_path),cv2.COLOR_BGR2RGB)
# Crop 70 pixels from top and 24 pixels from bottom, output = 66 x 320
center_img = center_img[70:136,:]
# Resize to 66 x 200 as required by nVidia architecture
center_img = cv2.resize(center_img,(200,66),interpolation = cv2.INTER_AREA)
images.append(center_img)
left_img_path = data_dir + line[1]
left_img = cv2.cvtColor(cv2.imread(left_img_path),cv2.COLOR_BGR2RGB)
# Crop 70 pixels from top and 24 pixels from bottom, output = 66 x 320
left_img = left_img[70:136,:]
# Resize to 66 x 200 as required by nVidia architecture
left_img = cv2.resize(left_img,(200,66),interpolation = cv2.INTER_AREA)
images.append(left_img)
right_img_path = data_dir + line[2]
right_img = cv2.cvtColor(cv2.imread(right_img_path),cv2.COLOR_BGR2RGB)
# Crop 70 pixels from top and 24 pixels from bottom, output = 66 x 320
right_img = right_img[70:136,:]
# Resize to 66 x 200 as required by nVidia architecture
right_img = cv2.resize(right_img,(200,66),interpolation = cv2.INTER_AREA)
images.append(right_img)
X_train = np.array(images)
y_train = np.array(angles)
# Return processed images for this batch but remember the value of local variables for next iteration
yield sklearn.utils.shuffle(X_train, y_train)
def nVidiaNet(train_generator,validation_generator,steps_per_epoch,validation_steps,save_model_dir):
'''
Impelments the nVidia CNN architecture (https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/).
Returns the model history object + also saves the model as 'model.h5' in the current working directory.
'''
nVidiaModel = Sequential()
nVidiaModel.add(Lambda(lambda x:(x/255.0)-0.5,input_shape=(66,200,3)))
print('Input shape:{}'.format(nVidiaModel.input_shape))
print('Output shape - after normalization:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(24,(5,5),strides=(2,2),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after first convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(36,(5,5),strides=(2,2),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after second convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(48,(5,5),strides=(2,2),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after third convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(64,(3,3),strides=(1,1),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after fourth convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(64,(3,3),strides=(1,1),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after fifth convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Flatten())
print('Output shape - after flattening:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(100,kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after first dense:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(50,kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after second dense:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(10,kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after third dense:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(1))
print('Output shape - after fourth dense:{}'.format(nVidiaModel.output_shape))
adam_optzr = optimizers.Adam(lr=LEARNING_RATE,decay=LEARNING_RATE_DECAY)
nVidiaModel.compile(optimizer=adam_optzr,loss='mse',metrics = ['accuracy'])
nVidiaModel_history = nVidiaModel.fit_generator(train_generator,
validation_data=validation_generator,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
epochs=EPOCHS)
dt = datetime.now()
model_name_prefix = dt.strftime("%y-%m-%d-%H-%M")
nVidiaModel.save(save_model_dir + model_name_prefix + '-model.h5')
# Write out the model params
model_params_file = open(save_model_dir + model_name_prefix + '-model-params.txt', 'w')
model_params_file.write('EPOCHS >>> {}\n'.format(EPOCHS))
model_params_file.write('BATCH SIZE >>> {}\n'.format(BATCH_SIZE))
model_params_file.write('LEARNING RATE >>> {}\n'.format(LEARNING_RATE))
model_params_file.write('LEARNING RATE DECAY >>> {}\n'.format(LEARNING_RATE_DECAY))
model_params_file.write('ANGLE CORRECTION FACTOR >>> {}\n'.format(ANGLE_CORRECTION_FACTOR))
model_params_file.write('BINS >>> {}\n'.format(BINS))
model_params_file.write('BIN RANGE >>> {}\n'.format(BIN_RANGE))
model_params_file.close()
return nVidiaModel_history
def main():
data_dir = 'C:/Users/Admin/Desktop/Behavioral Cloning/driving-data/'
driving_log_filename = 'driving_log.csv'
save_model_dir = './saved-models/'
samples = load_driving_log(data_dir + driving_log_filename)
print('Total samples:{}'.format(len(samples)))
samples = cleanup_data(samples)
print('Total samples after removing zero angles:{}'.format(len(samples)))
bin_edges,bin_values = draw_angles_distribution(samples,BINS,BIN_RANGE)
samples = balance_dataset(samples,bin_edges,bin_values,BINS)
_,_ = draw_angles_distribution(samples,BINS,BIN_RANGE)
train_samples,validation_samples = train_test_split(samples,test_size=0.2)
# Set up the data generators
train_generator = generator(train_samples,data_dir,batch_size=BATCH_SIZE)
validation_generator = generator(validation_samples,data_dir,batch_size=BATCH_SIZE)
# As we are adding the left & right images as well, so need x 3 times
total_samples = len(samples) * 3
actual_batch_size = BATCH_SIZE * 3
len_train = len(train_samples) * 3
len_valid = len(validation_samples) * 3
steps_per_epoch = len_train/actual_batch_size
validation_steps = len_valid/actual_batch_size
print('Total number of images used for training & validation:{}'.format(total_samples))
nVidiaModel_history = nVidiaNet(train_generator,validation_generator,steps_per_epoch,validation_steps,save_model_dir)
plt.plot(nVidiaModel_history.history['loss'])
plt.plot(nVidiaModel_history.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
if __name__ == '__main__':
main() | 2.609375 | 3 |
jumping_numbers.py | zuhi/Programming | 0 | 12788713 | <gh_stars>0
#A number is called as a Jumping Number if all adjacent digits in it differ by 1.
#The difference between ‘9’ and ‘0’ is not considered as 1.
All single digit numbers are considered as Jumping Numbers.
#For example 7, 8987 and 4343456 are Jumping numbers but 796 and 89098 are not.
#Given a positive number x, print all Jumping Numbers smaller than or equal to x. The numbers can be printed in any order.
def count_(no):
ar=list()
if( no<11):
for i in range(no):
ar.append(i)
else:
for i in range(11):
ar.append(i)
for j in range(11,no+1):
num=j
n=j
count=0
diff=[]
while(num!=0):
last=num%10
num=num/10
if(count>=1):
diff.append(last-prev)
prev=last
count += 1
check_var=0
for i in diff:
if(i==1 or i==-1):
check_var += 1
if(check_var==(count-1)):
ar.append(n)
return(ar)
print(count_(105))
| 3.84375 | 4 |
_ext/python/crawlab/client/__init__.py | crawlab-team/crawlab-python-sdk | 0 | 12788714 | from .request import *
from .response import *
from .client import *
| 1.0625 | 1 |
tests/test_node_network.py | tjjlemaire/MorphoSONIC | 0 | 12788715 | <reponame>tjjlemaire/MorphoSONIC<gh_stars>0
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Email: <EMAIL>
# @Date: 2020-01-13 19:51:33
# @Last Modified by: <NAME>
# @Last Modified time: 2021-07-27 17:47:02
import logging
from PySONIC.core import PulsedProtocol
from PySONIC.neurons import getPointNeuron
from PySONIC.test import TestBase
from PySONIC.utils import logger
from MorphoSONIC.plt import SectionCompTimeSeries, SectionGroupedTimeSeries
from MorphoSONIC.core.node import Node, DrivenNode
from MorphoSONIC.core.synapses import Exp2Synapse, FExp2Synapse, FDExp2Synapse
from MorphoSONIC.core.network import NodeCollection, NodeNetwork
from MorphoSONIC.parsers import TestNodeNetworkParser
''' Create and simulate a small network of nodes. '''
logger.setLevel(logging.INFO)
class TestNodeNetwork(TestBase):
parser_class = TestNodeNetworkParser
def runTests(self, testsets, args):
''' Run appropriate tests. '''
for s in args['subset']:
testsets[s](args['connect'])
def __init__(self):
''' Initialize network components. '''
# Point-neuron models
self.pneurons = {k: getPointNeuron(k) for k in ['RS', 'FS', 'LTS']}
# Synapse models
RS_syn_base = Exp2Synapse(tau1=0.1, tau2=3.0, E=0.0)
RS_LTS_syn = FExp2Synapse(
tau1=RS_syn_base.tau1, tau2=RS_syn_base.tau2, E=RS_syn_base.E, f=0.2, tauF=200.0)
RS_FS_syn = FDExp2Synapse(
tau1=RS_syn_base.tau1, tau2=RS_syn_base.tau2, E=RS_syn_base.E, f=0.5, tauF=94.0,
d1=0.46, tauD1=380.0, d2=0.975, tauD2=9200.0)
FS_syn = Exp2Synapse(tau1=0.5, tau2=8.0, E=-85.0)
LTS_syn = Exp2Synapse(tau1=0.5, tau2=50.0, E=-85.0)
# Synaptic connections
self.connections = {
'RS': {
'RS': (0.002, RS_syn_base),
'FS': (0.04, RS_FS_syn),
'LTS': (0.09, RS_LTS_syn)
},
'FS': {
'RS': (0.015, FS_syn),
'FS': (0.135, FS_syn),
'LTS': (0.86, FS_syn)
},
'LTS': {
'RS': (0.135, LTS_syn),
'FS': (0.02, LTS_syn)
}
}
# Driving currents
I_Th_RS = 0.17 # nA
Idrives = { # nA
'RS': I_Th_RS,
'FS': 1.4 * I_Th_RS,
'LTS': 0.0}
self.idrives = {k: (v * 1e-6) / self.pneurons[k].area for k, v in Idrives.items()} # mA/m2
# Pulsing parameters
tstim = 2.0 # s
toffset = 1.0 # s
PRF = 100.0 # Hz
DC = 1.0 # (-)
self.pp = PulsedProtocol(tstim, toffset, PRF, DC)
# Sonophore parameters
self.a = 32e-9
self.fs = 1.0
# US stimulation parameters
self.Fdrive = 500e3 # Hz
self.Adrive = 30e3 # Pa
def simulate(self, nodes, amps, connect):
# Create appropriate system
if connect:
system = NodeNetwork(nodes, self.connections)
else:
system = NodeCollection(nodes)
# Simulate system
data, meta = system.simulate(amps, self.pp)
# Plot membrane potential traces and comparative firing rate profiles
for id in system.ids:
SectionGroupedTimeSeries(id, [(data, meta)], pltscheme={'Q_m': ['Qm']}).render()
# SectionCompTimeSeries([(data, meta)], 'FR', system.ids).render()
def test_nostim(self, connect):
nodes = {k: Node(v) for k, v in self.pneurons.items()}
amps = self.idrives
self.simulate(nodes, amps, connect)
def test_nodrive(self, connect):
nodes = {k: Node(v, a=self.a, fs=self.fs) for k, v in self.pneurons.items()}
amps = {k: self.Adrive for k in self.pneurons.keys()}
self.simulate(nodes, amps, connect)
def test_full(self, connect):
nodes = {k: DrivenNode(v, self.idrives[k], Fdrive=self.Fdrive)
for k, v in self.pneurons.items()}
amps = {k: self.Adrive for k in self.pneurons.keys()}
self.simulate(nodes, amps, connect)
if __name__ == '__main__':
tester = TestNodeNetwork()
tester.main()
| 2.109375 | 2 |
notepower.py | pietro2356/NotePower | 0 | 12788716 | <reponame>pietro2356/NotePower
import tkinter as tk
# min: 12:17
# Classe controller
class NotePower:
def __init__(self, master):
master.title("Untitle - NotePower")
master.geometry("1200x700")
self.master = master
self.txtArea = tk.Text(master)
self.scroll = tk.Scrollbar(master, command=self.txtArea.yview)
self.txtArea.configure(yscrollcommand=self.scroll.set)
self.txtArea.pack(side=tk.LEFT)
self.scroll.pack(side=tk.RIGHT)
if __name__ == "__main__":
master = tk.Tk()
pt = NotePower(master)
master.mainloop()
| 3.21875 | 3 |
nerd/__init__.py | vishalbelsare/ner-d | 16 | 12788717 | <reponame>vishalbelsare/ner-d
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python module for Named Entity Recognition (NER)."""
from __future__ import absolute_import
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (c) 2019 <NAME>"
__license__ = "MIT License"
__version__ = "0.4.0"
__url__ = "https://github.com/verifid/ner-d"
__download_url__ = "https://pypi.org/project/ner-d/"
__description__ = "Python module for Named Entity Recognition (NER)."
from nerd import ner
| 0.945313 | 1 |
scripts/post/relabel_chain.py | ss199514/qfit-3.0 | 0 | 12788718 | <reponame>ss199514/qfit-3.0<filename>scripts/post/relabel_chain.py
#!/usr/bin/env python
"""Renaming Chains in holo based on corresponding apo"""
import argparse
import os
import numpy as np
from qfit.structure import Structure
def parse_args():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("holo_str", type=str,
help="PDB-file containing structure.")
p.add_argument("apo_str", type=str,
help="PDB-file containing structure.")
p.add_argument("holo_name", type=str,
help='holo pdb name')
p.add_argument("apo_name", type=str,
help='holo pdb name')
# Output options
args = p.parse_args()
return args
def main():
args = parse_args()
output_holo_file = os.path.join(args.holo_str[:-4]+"_renamed.pdb")
holo = Structure.fromfile(args.holo_str)
apo = Structure.fromfile(args.apo_str)
apo = apo.extract('record', 'ATOM')
output_holo = holo.extract("resi", 0, '==')
for chain_h in np.unique(holo.chain):
holo_copy = holo.copy()
tmp_h = holo.extract("chain", chain_h, '==')
tmp_h_atom = tmp_h.extract('record', 'ATOM')
dist = None
for chain_a in np.unique(apo.chain):
tmp_a = apo.extract("chain", chain_a, '==')
tot_dist = 0
for coor in tmp_h_atom.coor:
tot_dist += np.linalg.norm(tmp_a.coor - coor, axis=1)
tmp_dist = np.median(tot_dist)
if dist is None:
dist = tmp_dist
rename_chain = chain_a
else:
if dist > tmp_dist:
print('switching')
dist = tmp_dist
rename_chain = chain_a
output = holo_copy.extract("chain", chain_h, '==')
output.chain = rename_chain
output_holo = output_holo.combine(output)
del tmp_h
output_holo.tofile(output_holo_file)
if __name__ == '__main__':
main()
| 2.296875 | 2 |
www/generadorarboles.py | JoseManuelVargas/arandasoft-cpp-prueba-tecnica | 0 | 12788719 | <filename>www/generadorarboles.py
import random
class Nodo:
def __init__(self, valor):
self.valor = valor
self.izquierda = None
self.derecha = None
def insertar(self, valor):
q = []
q.append(self)
while (len(q)):
temp = q[0]
q.pop(0)
if not temp.izquierda:
temp.izquierda = Nodo(valor)
break
else:
q.append(temp.izquierda)
if not temp.derecha:
temp.derecha = Nodo(valor)
break
else:
q.append(temp.derecha)
def encontrar_camino(self, camino, valor):
camino.append(self.valor)
if self.valor == valor:
return True
if (self.izquierda is not None and self.izquierda.encontrar_camino(camino, valor)) or (self.derecha is not None and self.derecha.encontrar_camino(camino, valor)):
return True
camino.pop()
return False
def calcular_ancestro(self, n1, n2):
camino1 = []
camino2 = []
if not self.encontrar_camino(camino1, n1) or not self.encontrar_camino(camino2, n2):
return -1
i = 0
while i < len(camino1) and i < len(camino2):
if camino1[i] != camino2[i]:
break
i += 1
return camino1[i-1]
if __name__ == "__main__":
texto = ""
for i in range(500):
longitud = random.randint(3, 60)
valores = set()
for i in range(longitud):
valores.add(random.randint(1, 1000))
valores = list(valores)
arbol = Nodo(valores[0])
for valor in valores[1:]:
arbol.insertar(valor)
texto += "a\n"
texto += ",".join(str(x) for x in valores) + "\n"
ancestros = random.randint(5, 10)
for i in range(ancestros):
nodo1 = random.choice(valores)
nodo2 = random.choice(valores)
ancestro = arbol.calcular_ancestro(nodo1, nodo2)
texto += f"{nodo1},{nodo2},{ancestro}\n"
with open("conjuntoarboles.dat", "w") as archivo:
archivo.write(texto)
| 3.546875 | 4 |
errgrep/log_line.py | csm10495/errgrep | 0 | 12788720 | import functools
import pathlib
import queue
import re
import sys
import time
import typing
from .line_timestamper import LineTimestamper
from .non_blocking_read_thread import stdin_read_thread
class LogLine:
def __init__(self, raw_text=None, raw_text_lines=None,
log_file=None, read_from_stdin=False, previous_line:typing.Optional[typing.TypeVar('LogLine')]=None,
line_timestamper:typing.Optional[LineTimestamper]=None, max_seconds_till_line_split:float=1,
next_line_index:int=0, allow_timestamp_format_changes:bool=False):
'''
If a - is given as the log_file, will read from stdin, (and ignore read_from_stdin)
'''
if (raw_text and log_file and raw_text_lines and read_from_stdin) or \
(raw_text is None and log_file is None and raw_text_lines is None and read_from_stdin is False):
raise ValueError("Please provide either raw_text or log_file or raw_text_lines... \
not more or less than one. Or we can use read_from_stdin without one of the others.")
# splitlines() is slow on big inputs... try to minimize how often we do it
self.raw_text_lines = []
self.read_from_stdin = read_from_stdin
self.next_line_index = next_line_index
if raw_text_lines:
self.raw_text_lines = raw_text_lines
elif raw_text:
self.raw_text_lines = raw_text.splitlines()
elif log_file:
if log_file == '-':
self.read_from_stdin = True
else:
self.raw_text_lines = pathlib.Path(log_file).read_text().splitlines()
# We can read_from_stdin AFTER raw_text_lines
if self.read_from_stdin:
stdin_read_thread.start_if_not_started_yet()
# when reading from stdin, we wait at most this much time before assuming a log line split
self.max_seconds_till_line_split = max_seconds_till_line_split
self.timestamp = None
self.log_line_lines = []
self.log_message = ''
self.previous_line = previous_line
self.line_timestamper = line_timestamper or LineTimestamper(allow_timestamp_format_changes=allow_timestamp_format_changes)
self._parse()
def _iter_lines(self):
''' yields a line from the given place... if it yields a None, assume that a line break happened '''
if self.raw_text_lines:
for idx in range(self.next_line_index, len(self.raw_text_lines), 1):
yield self.raw_text_lines[idx]
if self.read_from_stdin:
break_force_time = time.time() + self.max_seconds_till_line_split
while stdin_read_thread.is_alive():
try:
line = stdin_read_thread.lines_queue.get_nowait()
self.raw_text_lines.append(line)
break_force_time = time.time() + self.max_seconds_till_line_split
yield line
except queue.Empty:
if time.time() > break_force_time:
break_force_time = time.time() + self.max_seconds_till_line_split
yield None
time.sleep(.0001)
def _parse(self):
self.log_line_lines = []
# Key Assumption:
# All lines without timestamp are part of this log statement
for line in self._iter_lines():
if line is None:
# force a line break right now... timestamp should be set from earlier on
break
timestamp = self.line_timestamper.coerce_datetime_from_line(line)
if timestamp:
if len(self.log_line_lines) == 0:
self.timestamp = timestamp
self.log_line_lines.append(line)
else:
# new timestamp means we're done
break
else:
self.log_line_lines.append(line)
self.log_message = '\n'.join(self.log_line_lines)
@functools.lru_cache(maxsize=100)
def get_next_log_line(self) -> typing.Optional[typing.TypeVar('LogLine')]:
'''
Returns the next LogLine in the log.
Returns None if there is no more available
'''
new_next_line_index = self.next_line_index + len(self.log_line_lines)
if (new_next_line_index < len(self.raw_text_lines)) or (self.read_from_stdin and stdin_read_thread.is_alive()):
return LogLine(raw_text_lines=self.raw_text_lines,
previous_line=self,
read_from_stdin=self.read_from_stdin,
line_timestamper=self.line_timestamper,
next_line_index=new_next_line_index)
def iter_log_lines_with_regex(self, regex, ignore_case=True):
'''
Goes through all LogLines checking if the message matches the regex. For each that,
matches, yields the matching LogLine.
'''
current_line = self
regex_c = re.compile(regex, flags=re.IGNORECASE if ignore_case else 0)
# walk through all lines
while current_line is not None:
if re.findall(regex_c, current_line.log_message):
yield current_line
current_line = current_line.get_next_log_line()
| 2.84375 | 3 |
osm_graphml_downloader_cli.py | maptastik/osm_graphml_downloader | 0 | 12788721 | <filename>osm_graphml_downloader_cli.py
from time import perf_counter
start = perf_counter()
import os
import shutil
import gzip
import click
from datetime import timedelta
from osm_graphml_downloader import osm_graphml_downloader
@click.command()
@click.option('-x', '--network_type', help = 'Network type: "walk", "bike", "drive", "drive_service", "all", "all_private", "none"')
@click.option('-o', '--out_dir', help = 'Path to directory where you want the results to go')
@click.option('-f', '--filename', default = None, help = 'Name of .graphml ouput file')
@click.option('-n', '--north', help = 'Area of interest north extent', type = float)
@click.option('-s', '--south', help = 'Area of interest south extent', type = float)
@click.option('-e', '--east', help = 'Area of interest east extent', type = float)
@click.option('-w', '--west', help = 'Area of interest west extent', type = float)
@click.option('--custom_filter', help = 'Custom filter')
@click.option('--reproject', is_flag = True, help = 'Option to reproject the graph from WGS84 to something else')
@click.option('--epsg_code', help = 'EPSG code value. Only used if --reproject flag is used')
@click.option('--simplify', is_flag = True, help = 'Option to download a simplified network.')
@click.option('--compress', is_flag = True, help = 'Compress final output using gzip.')
def main(network_type, out_dir, filename, north, south, east, west, custom_filter, reproject, epsg_code, simplify, compress):
if epsg_code is not None:
epsg_code = int(epsg_code)
osm_graphml_downloader(network_type = network_type,
out_dir = out_dir,
filename = filename,
bbox = [west, south, east, north],
custom_filter = custom_filter,
reproject = reproject,
epsg_code = epsg_code,
simplify = simplify)
if compress:
print("Compressing results...")
if filename is None:
filename = f'graph_{network_type}.graphml'
else:
filename_no_extension = filename.split('.')[0]
filename = f'{filename_no_extension}.graphml'
print('filename:', filename)
downloaded_graph = os.path.join(out_dir, filename)
print('downloaded_graph:', downloaded_graph)
try:
with open(downloaded_graph, 'rb') as f_in:
with gzip.open(f"{downloaded_graph}.gz", 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(downloaded_graph)
except Exception as e:
print(e)
end = perf_counter()
completion_seconds = end - start
print(f"Elapsed time: {timedelta(seconds = completion_seconds)}")
if __name__ == "__main__":
main() | 2.671875 | 3 |
test/countries/test_canada.py | OmoMicheal/marketanalysis | 2 | 12788722 | <reponame>OmoMicheal/marketanalysis
# -*- coding: utf-8 -*-
# marketanalysis
# ----------------
# A fast, efficient Python library for generating country, province and state
# specific sets of marketholidayss on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: MichealOmojola <<EMAIL>>
# Website: https://github.com/OmoMicheal/trading_days
# License: MIT (see LICENSE file)
# Version: 0.1 (April 7, 2021)
import unittest
from datetime import date
from dateutil.relativedelta import relativedelta
# import sys
# sys.path.insert(0, 'C:/Users/momojola/projects/marketanalysis/marketanalysis/')
from marketanalysis import marketholidays
from marketanalysis import markettradingdays
class TestCA(unittest.TestCase):
# https://www.thecse.com/en/trading/market-operations
def setUp(self):
self.marketholidayss = marketholidays.CA(observed=False)
self.markettradingdayss = markettradingdays.CA()
def test_new_years(self):
self.assertNotIn(date(2010, 12, 31), self.marketholidayss)
self.assertNotIn(date(2017, 1, 2), self.marketholidayss)
self.marketholidayss.observed = True
self.assertIn(date(2010, 12, 31), self.marketholidayss)
self.assertIn(date(2017, 1, 2), self.marketholidayss)
self.marketholidayss.observed = False
for year in range(1900, 2100):
dt = date(year, 1, 1)
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_martin_luther(self):
for dt in [
date(1986, 1, 20),
date(1999, 1, 18),
date(2000, 1, 17),
date(2012, 1, 16),
date(2013, 1, 21),
date(2014, 1, 20),
date(2015, 1, 19),
date(2016, 1, 18),
date(2020, 1, 20),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_family_day(self):
ab_marketholidayss = marketholidays.CA()
for dt in [
date(1990, 2, 19),
date(1999, 2, 15),
date(2000, 2, 21),
date(2006, 2, 20),
]:
self.assertIn(dt, self.marketholidayss)
self.assertIn(dt, ab_marketholidayss)
dt = date(2007, 2, 19)
self.assertIn(dt, self.marketholidayss)
self.assertIn(dt, ab_marketholidayss)
for dt in [
date(2008, 2, 18),
date(2012, 2, 20),
date(2014, 2, 17),
date(2018, 2, 19),
]:
self.assertIn(dt, self.marketholidayss)
self.assertIn(dt, ab_marketholidayss)
for dt in [date(2019, 2, 18), date(2020, 2, 17)]:
self.assertIn(dt, self.marketholidayss)
self.assertIn(dt, ab_marketholidayss)
for dt in [date(2013, 2, 11), date(2016, 2, 8)]:
self.assertNotIn(dt, self.marketholidayss)
self.assertNotIn(dt, ab_marketholidayss)
def test_good_friday(self):
qc_marketholidayss = marketholidays.CA()
for dt in [
date(1900, 4, 13),
date(1901, 4, 5),
date(1902, 3, 28),
date(1999, 4, 2),
date(2000, 4, 21),
date(2010, 4, 2),
date(2018, 3, 30),
date(2019, 4, 19),
date(2020, 4, 10),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
self.assertIn(dt, qc_marketholidayss)
def test_victoria_day(self):
for dt in [
date(1953, 5, 18),
date(1999, 5, 24),
date(2000, 5, 22),
date(2010, 5, 24),
date(2015, 5, 18),
date(2020, 5, 18),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_civic_holiday(self):
bc_marketholidayss = marketholidays.CA()
for dt in [date(1900, 8, 6), date(1955, 8, 1), date(1973, 8, 6)]:
self.assertIn(dt, self.marketholidayss)
self.assertIn(dt, bc_marketholidayss)
for dt in [
date(1974, 8, 5),
date(1999, 8, 2),
date(2000, 8, 7),
date(2010, 8, 2),
date(2015, 8, 3),
date(2020, 8, 3),
]:
self.assertIn(dt, self.marketholidayss)
self.assertIn(dt, bc_marketholidayss)
def test_labour_day(self):
self.assertNotIn(date(1893, 9, 4), self.marketholidayss)
for dt in [
date(1894, 9, 3),
date(1900, 9, 3),
date(1999, 9, 6),
date(2000, 9, 4),
date(2014, 9, 1),
date(2015, 9, 7),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_thanksgiving(self):
ns_marketholidayss = marketholidays.CA()
for dt in [
date(1931, 10, 12),
date(1990, 10, 8),
date(1999, 10, 11),
date(2000, 10, 9),
date(2013, 10, 14),
date(2020, 10, 12),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
self.assertIn(dt, ns_marketholidayss)
def test_remembrance_day(self):
ab_marketholidayss = marketholidays.CA(observed=False)
self.assertNotIn(date(1930, 11, 11), ab_marketholidayss)
for year in range(1931, 2100):
dt = date(year, 11, 11)
self.assertNotIn(dt, self.marketholidayss)
self.assertNotIn(dt, ab_marketholidayss)
self.assertNotIn(date(2007, 11, 12), ab_marketholidayss)
ab_marketholidayss.observed = True
self.assertNotIn(date(2007, 11, 12), ab_marketholidayss)
def test_christmas_day(self):
for year in range(1900, 2100):
dt = date(year, 12, 25)
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(date(2010, 12, 24), self.marketholidayss)
self.marketholidayss.observed = True
self.assertEqual(self.marketholidayss[date(2011, 12, 26)], "Christmas Day (Observed)")
self.assertIn(date(2010, 12, 24), self.marketholidayss)
self.assertEqual(
self.marketholidayss[date(2011, 12, 26)], "Christmas Day (Observed)"
)
def test_boxing_day(self):
for year in range(1900, 2100):
dt = date(year, 12, 26)
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
self.assertNotIn(date(2009, 12, 28), self.marketholidayss)
self.assertNotIn(date(2010, 12, 27), self.marketholidayss)
self.marketholidayss.observed = True
self.assertIn(date(2009, 12, 28), self.marketholidayss)
self.assertIn(date(2010, 12, 27), self.marketholidayss)
def test_future_list(self):
current_date = '2021-04-13'
lookup_step = 10
self.assertIn(date(2021, 4, 16), self.markettradingdayss.future_list(current_date, lookup_step))
self.assertNotIn(date(2021, 4, 18), self.markettradingdayss.future_list(current_date, lookup_step))
def test_prevDays(self):
current_date = '2021-04-13'
lookback_step = 4
self.assertIn(date(2021, 4, 9), self.markettradingdayss.prevDays(current_date, lookback_step))
self.assertNotIn(date(2021, 4, 11), self.markettradingdayss.prevDays(current_date, lookback_step))
def test_BtwDates(self):
current_date = '2021-04-13'
future_date = '2021-04-20'
self.assertIn(date(2021, 4, 15), self.markettradingdayss.BtwDates(current_date, future_date))
self.assertNotIn(date(2021, 4, 18), self.markettradingdayss.BtwDates(current_date, future_date))
# if __name__ == "__main__":
# unittest.main() | 2.53125 | 3 |
kernel/components/binning/horzfeaturebinning/param.py | rinceyuan/WeFe | 39 | 12788723 | # Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kernel.components.binning.vertfeaturebinning.param import FeatureBinningParam, TransformParam
from kernel.utils import consts
class HorzFeatureBinningParam(FeatureBinningParam):
def __init__(self, method=consts.VIRTUAL_SUMMARY,
compress_thres=consts.DEFAULT_COMPRESS_THRESHOLD,
head_size=consts.DEFAULT_HEAD_SIZE,
error=consts.DEFAULT_RELATIVE_ERROR,
sample_bins=100,
bin_num=consts.G_BIN_NUM, bin_indexes=-1, bin_names=None, adjustment_factor=0.5,
transform_param=TransformParam(),
category_indexes=None, category_names=None,
need_run=True, max_iter=100):
super(HorzFeatureBinningParam, self).__init__(method=method, compress_thres=compress_thres,
head_size=head_size, error=error,
bin_num=bin_num, bin_indexes=bin_indexes,
bin_names=bin_names, adjustment_factor=adjustment_factor,
transform_param=transform_param,
category_indexes=category_indexes, category_names=category_names,
need_run=need_run)
self.sample_bins = sample_bins
self.max_iter = max_iter
def check(self):
descr = "horz binning param's"
super(HorzFeatureBinningParam, self).check()
self.check_string(self.method, descr)
self.method = self.method.lower()
self.check_valid_value(self.method, descr, [consts.VIRTUAL_SUMMARY, consts.RECURSIVE_QUERY])
self.check_positive_integer(self.max_iter, descr)
if self.max_iter > 100:
raise ValueError("Max iter is not allowed exceed 100")
def set_bin_index(self, header):
self.bin_indexes = [header.index(name) for name in self.bin_names]
| 1.515625 | 2 |
nova/tests/functional/compute/test_aggregate_api.py | ChameleonCloud/nova | 1 | 12788724 | <filename>nova/tests/functional/compute/test_aggregate_api.py<gh_stars>1-10
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests import uuidsentinel as uuids
class ComputeAggregateAPIMultiCellTestCase(test.NoDBTestCase):
"""Tests for the AggregateAPI with multiple cells allowing either service
hosts or compute nodes to be associated with an aggregate.
"""
USES_DB_SELF = True
def setUp(self):
super(ComputeAggregateAPIMultiCellTestCase, self).setUp()
self.agg_api = compute_api.AggregateAPI()
self.useFixture(nova_fixtures.Database(database='api'))
celldbs = nova_fixtures.CellDatabases()
celldbs.add_cell_database(objects.CellMapping.CELL0_UUID)
celldbs.add_cell_database(uuids.cell1, default=True)
celldbs.add_cell_database(uuids.cell2)
self.useFixture(celldbs)
self.ctxt = context.get_admin_context()
cell0 = objects.CellMapping(
context=self.ctxt, uuid=objects.CellMapping.CELL0_UUID,
database_connection=objects.CellMapping.CELL0_UUID,
transport_url='none:///')
cell0.create()
cell1 = objects.CellMapping(
context=self.ctxt, uuid=uuids.cell1,
database_connection=uuids.cell1, transport_url='none:///')
cell1.create()
cell2 = objects.CellMapping(
context=self.ctxt, uuid=uuids.cell2,
database_connection=uuids.cell2, transport_url='none:///')
cell2.create()
self.cell_mappings = (cell0, cell1, cell2)
# create two Ironic nodes managed by a single nova-compute service host
# in each of the non-cell0 cells
for cell_id, cell in enumerate(self.cell_mappings[1:]):
with context.target_cell(self.ctxt, cell) as cctxt:
hostname = 'ironic_host_cell%s' % (cell_id + 1)
svc = objects.Service(cctxt, host=hostname,
binary='nova-compute',
topic='nova-compute')
svc.create()
for node_id in (1, 2):
nodename = 'ironic_node_cell%s_%s' % (cell_id + 1, node_id)
compute_node_uuid = getattr(uuids, nodename)
node = objects.ComputeNode(
cctxt, uuid=compute_node_uuid, host=hostname,
vcpus=2, memory_mb=2048, local_gb=128, vcpus_used=0,
memory_mb_used=0, local_gb_used=0, cpu_info='{}',
hypervisor_type='ironic', hypervisor_version=10,
hypervisor_hostname=nodename)
node.create()
# create a compute node for VMs along with a corresponding nova-compute
# service host in cell1
with context.target_cell(self.ctxt, cell1) as cctxt:
hostname = 'vm_host_cell1_1'
svc = objects.Service(cctxt, host=hostname,
binary='nova-compute',
topic='nova-compute')
svc.create()
compute_node_uuid = getattr(uuids, hostname)
node = objects.ComputeNode(
cctxt, uuid=compute_node_uuid, host=hostname,
vcpus=2, memory_mb=2048, local_gb=128, vcpus_used=0,
memory_mb_used=0, local_gb_used=0, cpu_info='{}',
hypervisor_type='libvirt', hypervisor_version=10,
hypervisor_hostname=hostname)
node.create()
def test_service_hostname(self):
"""Test to make sure we can associate and disassociate an aggregate
with a service host.
"""
agg = objects.Aggregate(self.ctxt, name="rack1_baremetal")
agg.create()
agg_id = agg.id
# There is no such service host called unknown_host_cell1, so should
# get back a ComputeHostNotFound
self.assertRaises(exception.ComputeHostNotFound,
self.agg_api.add_host_to_aggregate, self.ctxt,
agg_id, 'unknown_host_cell1')
self.assertRaises(exception.ComputeHostNotFound,
self.agg_api.remove_host_from_aggregate, self.ctxt,
agg_id, 'unknown_host_cell1')
hosts = ('ironic_host_cell1', 'ironic_host_cell2', 'vm_host_cell1_1')
for service_host in hosts:
self.agg_api.add_host_to_aggregate(self.ctxt, agg_id, service_host)
self.agg_api.remove_host_from_aggregate(self.ctxt, agg_id,
service_host)
def test_compute_nodename(self):
"""Test to make sure we can associate and disassociate an aggregate
with a compute node by its hypervisor_hostname.
"""
agg = objects.Aggregate(self.ctxt, name="rack1_baremetal")
agg.create()
agg_id = agg.id
# There is no such compute node called unknown_host_cell1, so should
# get back a ComputeHostNotFound
self.assertRaises(exception.ComputeHostNotFound,
self.agg_api.add_host_to_aggregate, self.ctxt,
agg_id, getattr(uuids, 'unknown_node_cell1'))
self.assertRaises(exception.ComputeHostNotFound,
self.agg_api.remove_host_from_aggregate, self.ctxt,
agg_id, getattr(uuids, 'unknown_host_cell1'))
nodenames = ('ironic_node_cell1_2', 'ironic_node_cell2_1',
'vm_host_cell1_1')
for nodename in nodenames:
self.agg_api.add_host_to_aggregate(self.ctxt, agg_id, nodename)
self.agg_api.remove_host_from_aggregate(self.ctxt, agg_id,
nodename)
| 2.03125 | 2 |
scripts/_evaluate.py | leichtrhino/torch-chimera | 0 | 12788725 |
import os
import sys
import torch
try:
import torchchimera
except:
# attempts to import local module
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import torchchimera
from torchchimera.datasets import FolderTuple
from torchchimera.metrics import eval_snr
from torchchimera.metrics import eval_si_sdr
from _model_io import load_model
from _training_common import AdaptedChimeraMagPhasebook
from _training_common import exclude_silence
def add_evaluation_io_argument(parser):
parser.add_argument('--data-dir', nargs='+', required=True, help="directory of validation dataset")
parser.add_argument('--input-checkpoint', help='input checkpoint file')
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--output-file', help='output file')
parser.add_argument('--log-file', help='log file')
parser.add_argument('--permutation-free', action='store_true', help='enable permutation-free evaluation function')
return parser
def validate_evaluation_io_argument(args, parser):
for d in args.data_dir:
if not os.path.isdir(d):
parser.error(f'"{d}" is not a directory')
if args.input_checkpoint and not os.path.isfile(args.input_checkpoint):
parser.error(f'input checkpoint "{args.input_checkpoint}" is not a file')
if args.batch_size <= 0:
parser.error('batch size must be positive')
return args
def evaluate(args):
# build dataset
dataset = FolderTuple(args.data_dir, args.sr, args.segment_duration)
loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size, shuffle=False
)
# load a model
model, update_args = load_model(
args.input_checkpoint, 'ChimeraMagPhasebook',
stft_setting=args.stft_setting
)
if args.bin_num != update_args['bin_num']:
bin_num = checkpoint['model']['parameter']['bin_num']
raise RuntimeError(
'the number of fft bin of input model and parameter are different '
f'--n-fft {(bin_num-1)*2} would work'
)
if len(args.data_dir) != update_args['n_channel']:
raise RuntimeError(
'the number of channels of the input model '
'and the output files are different'
)
model.to(args.device)
model.eval()
if args.permutation_free:
eval_snr = torchchimera.metrics.permutation_free(
torchchimera.metrics.eval_snr, aggregate_functionn=max
)
eval_si_sdr = torchchimera.metrics.permutation_free(
torchchimera.metrics.eval_si_sdr, aggregate_function=max
)
else:
eval_snr = torchchimera.metrics.eval_snr
eval_si_sdr = torchchimera.metrics.eval_si_sdr
# evaluation loop
if args.output_file is None:
of = sys.stdout
else:
of = open(args.output_file, 'w')
print('segment,channel,snr,si-sdr', file=of)
with torch.no_grad():
for batch_i, s in enumerate(loader, 0):
scale = torch.sqrt(
s.shape[-1] / torch.sum(s**2, dim=-1).clamp(min=1e-32)
)
scale_mix = 1. / torch.max(
torch.sum(scale.unsqueeze(-1) * s, dim=1).abs(), dim=-1
)[0]
scale_mix = torch.min(scale_mix, torch.ones_like(scale_mix))
scale *= scale_mix.unsqueeze(-1)
s *= scale.unsqueeze(-1) * 0.98
s = s.to(args.device)
_, _, shat, _ = model(s.sum(dim=1))
waveform_length = min(s.shape[-1], shat.shape[-1])
s = s[:, :, :waveform_length]
shat = shat[:, :, :waveform_length]
snr = eval_snr(shat, s)
si_sdr = eval_si_sdr(shat, s)
for i, (_snr, _si_sdr) in enumerate(zip(snr, si_sdr), 1):
sample_i = batch_i * args.batch_size + i
for channel_i, (__snr, __si_sdr) in \
enumerate(zip(_snr, _si_sdr), 1):
print(f'{sample_i},{channel_i},{__snr},{__si_sdr}', file=of)
of.close()
| 2.140625 | 2 |
lithium/manage/templates/config.py | PressLabs/lithium | 2 | 12788726 | API_VERSION = '1'
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
DEBUG = True
| 1.21875 | 1 |
Flyon/CCKS_CRF/eval/onefile.py | kyzhouhzau/CCKS-2018- | 89 | 12788727 | import glob
import os
# files = glob.glob("./finall/*")
# files = sorted(files)
# print(files)
import codecs
with open("result.txt",'w') as wf:
for num in range(1,401):
file = "./finall/入院记录现病史-"+str(num)+".txt"
with codecs.open(file,'r',encoding='utf-8') as rf:
for i,line in enumerate(rf):
result = []
name = os.path.basename(file)
name1 = name.split('.')[0]
name2 = name1.split('-')[-1]
line = line.strip()
if i == 0:
wf.write("{},{};".format(int(name2),line))
else:
wf.write("{};".format(line))
wf.write('\n') | 3.15625 | 3 |
July21/EssentialPython/classesandobjects/calc.py | pythonbykhaja/intesivepython | 2 | 12788728 | class Calculator:
def __init__(self) -> None:
self.memory = list()
def add(self, *args):
result = 0
for number in args:
result += number
self.append_to_memory(result)
return result
def multiply(self, *args):
result = 1
for number in args:
result *= number
self.append_to_memory(result)
return result
def sub(self, number1, number2):
result = number1 - number2
self.append_to_memory(result)
return result
def div(self, number1, number2):
result = number1/number2
self.append_to_memory(result)
return
def append_to_memory(self,result):
self.memory.append(result)
def memory_reset(self):
self.memory.clear()
def memory_op(self, index):
return self.memory[index]
calc = Calculator()
calc.add(1,2,3,4,5)
calc.sub(5,1)
calc.div(4,2)
calc.multiply(1,2,3)
print(calc.memory_op(0))
print(calc.memory_op(-1))
| 3.8125 | 4 |
models/LSTM/LSTM_Model.py | yassienshaalan/DTOPS | 3 | 12788729 | from keras.models import Sequential, load_model
from keras.callbacks import History, EarlyStopping, Callback
from keras.layers.recurrent import LSTM
from keras.layers import Bidirectional
from keras.losses import mse, binary_crossentropy,cosine
from keras.layers.core import Dense, Activation, Dropout
import numpy as np
import os
from matplotlib import pyplot as plt
from tensorflow import keras
import tensorflow as tf
class LSTM_NETWORK(object):
def __init__(self, input_dim,layers,batch_size=32,l_s=5,l_p=1):
"""input_dim_list must include the original data dimension"""
assert len(layers) >= 2
self.l_s = l_s
self.l_p = l_p
self.batch_size = batch_size
self.loss = 0#zero for mse, 1 for cosine similarity
self.cbs = [History(),EarlyStopping(monitor='val_loss', patience=5, min_delta=0.0003, verbose=0)]
model = Sequential()
model.add((LSTM(layers[0], input_shape=(l_s, input_dim),
return_sequences=True)))
#return_sequences=True)))
model.add(Dropout(0.3))
model.add(LSTM(layers[1], return_sequences=True))#return_sequences=True))
model.add(Dropout(0.3))
model.add(Dense(self.l_p*input_dim))
model.add(Activation("linear"))
# model.add(Dense(activation='linear', units=y_train.shape[2]))
if self.loss == 0:
model.compile(loss='mse', optimizer='adam')
else:
loss_fn = keras.losses.CosineSimilarity()
model.compile(loss=loss_fn, optimizer='adam')
# print("here is model summary")
#print(model.summary())
self.model = model
return
def create_one_layer_model(self,input_dim,layers,batch_size=32,l_s=5,l_p=1):
assert len(layers) >= 2
self.l_s = l_s
self.l_p = l_p
self.batch_size = batch_size
self.cbs = [History(),EarlyStopping(monitor='val_loss', patience=15, min_delta=0.0003, verbose=0)]
model = Sequential()
model.add((LSTM(layers[0], input_shape=(None, input_dim))))
model.add(Dropout(0.3))
model.add(Dense(self.l_p*input_dim))
model.add(Activation("linear"))
# model.add(Dense(activation='linear', units=y_train.shape[2]))
if self.loss == 0:
model.compile(loss='mse', optimizer='adam')
else:
loss_fn = keras.losses.CosineSimilarity()
model.compile(loss=loss_fn, optimizer='adam')
#import tensorflow as tf
#model.compile(loss=tf.keras.losses.CosineSimilarity(), optimizer='adam')
# print("here is model summary")
#print(model.summary())
#print("this is neww model")
self.model = model
return
def fit(self, X,y, epochs=100,validation_split=0.15, verbose=False,model_num=-1):
history = self.model.fit(X, y, batch_size=self.batch_size, epochs=epochs,
validation_split=validation_split, verbose=verbose, callbacks=self.cbs)
#print(history.history.keys())
# "Accuracy"
'''
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
'''
# "Loss"
'''
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
'''
if model_num!=-1:
self.model.save("LSTM_v"+str(model_num)+".h5")
return
def load_model(self,num):
self.model=load_model(os.path.join("", "LSTM_v"+ str(num)+ ".h5"))
return self.model
def predict(self, X_test):
'''
Used trained LSTM model to predict test data arriving in batches
Args:
X_test (np array): numpy array of test inputs with dimensions [timesteps, l_s, input dimensions)
Returns:
y_hat (np array): predicted test values for each timestep in y_test
'''
print("Predicting by Patch")
y_hat = []#np.array([[[]]])
# print("y_hat intially",y_hat.shape)
num_batches = int((X_test.shape[0] - self.l_s) / self.batch_size)
print("number of batches",num_batches)
if num_batches < 0:
raise ValueError("l_s (%s) too large for stream with length %s." % (self.l_s, y_test.shape[0]))
# simulate data arriving in batches
for i in range(1, num_batches + 2):
#print("Inside the loop")
prior_idx = (i - 1) * self.batch_size
idx = i * self.batch_size
if i == num_batches + 1:
idx = X_test.shape[0] # remaining values won't necessarily equal batch size
X_test_period = X_test[prior_idx:idx]
#print("Predict for batch")
#print("X_test_period",type(X_test_period),len(X_test_period))
y_hat_period = self.model.predict(X_test_period)
#print("y_hat_period out",y_hat_period.shape)
#y_hat_period=np.array(y_hat_period)
#print("y_hat_period after reshape",y_hat_period.shape)
#print("y_hat now",y_hat_period.shape)
if i ==1:
y_hat =y_hat_period
#y_hat_period=np.array(y_hat_period)
#print("y_hat now",y_hat_period.shape)
else:
y_hat = np.append(y_hat, y_hat_period)
#print("y_hat", y_hat.shape)
print("Out of loop, final transformation")
y_hat = y_hat.reshape(X_test.shape[0], X_test.shape[2])
print("y_hat final", y_hat.shape)
# np.save(os.path.join("data", anom['run_id'], "y_hat", anom["chan_id"] + ".npy"), np.array(y_hat))
return y_hat
def predict_all(self, X_test):
'''
Used trained LSTM model to predict test data arriving in batches
Args:
y_test (np array): numpy array of test outputs corresponding to true values to be predicted at end of each sequence
X_test (np array): numpy array of test inputs with dimensions [timesteps, l_s, input dimensions)
Returns:
y_hat (np array): predicted test values for each timestep in y_test
'''
#print("Predicting All")
y_hat = self.model.predict(X_test)
#print("y_hat other",y_hat.shape)
return y_hat | 2.828125 | 3 |
tests/robotcode/jsonrpc/test_jsonrpcprotocol.py | d-biehl/robotcode | 21 | 12788730 | import asyncio
import json
from typing import Any, Dict, Generator, List, Optional, cast
import pytest
from robotcode.jsonrpc2.protocol import (
JsonRPCError,
JsonRPCErrorObject,
JsonRPCErrors,
JsonRPCMessage,
JsonRPCProtocol,
JsonRPCRequest,
JsonRPCResponse,
)
from robotcode.jsonrpc2.server import JsonRPCServer
from robotcode.language_server.common.types import MessageActionItem
class DummyJsonRPCProtocol(JsonRPCProtocol):
def __init__(self, server: Optional[JsonRPCServer["DummyJsonRPCProtocol"]]):
super().__init__()
self.handled_messages: List[JsonRPCMessage] = []
self.sended_message: Optional[JsonRPCMessage] = None
async def handle_message(self, message: JsonRPCMessage) -> None:
self.handled_messages.append(message)
return await super().handle_message(message)
def send_message(self, message: JsonRPCMessage) -> None:
self.sended_message = message
async def data_received_async(self, data: bytes) -> None:
self.data_received(data)
return await asyncio.sleep(0)
@pytest.fixture(scope="module")
def event_loop() -> Generator[asyncio.AbstractEventLoop, None, None]:
loop = asyncio.new_event_loop()
yield loop
loop.close()
@pytest.mark.asyncio
async def test_receive_a_request_message_should_work() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCRequest(id=1, method="doSomething", params={})
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == [message]
@pytest.mark.asyncio
async def test_receive_a_request_message_should_work_with_string_id() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCRequest(id="this is an id", method="doSomething", params={})
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == [message]
@pytest.mark.asyncio
async def test_receive_a_batch_request_should_work() -> None:
protocol = DummyJsonRPCProtocol(None)
message = [
JsonRPCRequest(id=1, method="doSomething", params={}),
JsonRPCRequest(id=2, method="doSomething", params={}),
JsonRPCRequest(id=3, method="doSomething", params={}),
]
json_message = json.dumps([e.dict() for e in message]).encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == message
@pytest.mark.asyncio
async def test_receive_invalid_jsonmessage_should_throw_send_an_error() -> None:
protocol = DummyJsonRPCProtocol(None)
json_message = b"{"
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert (
isinstance(protocol.sended_message, JsonRPCError)
and protocol.sended_message.error.code == JsonRPCErrors.PARSE_ERROR
)
@pytest.mark.asyncio
async def test_receive_a_request_with_invalid_protocol_version_should_send_an_error() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCRequest(id=1, method="doSomething", params={})
message.jsonrpc = "1.0"
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert (
isinstance(protocol.sended_message, JsonRPCError)
and protocol.sended_message.error.code == JsonRPCErrors.PARSE_ERROR
)
@pytest.mark.asyncio
async def test_receive_an_error_should_work() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCError(id=1, result=None, error=JsonRPCErrorObject(code=1, message="test", data="this is the data"))
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == [message]
@pytest.mark.asyncio
async def test_receive_response_should_work() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], list)
msg = JsonRPCResponse(id=cast(JsonRPCRequest, protocol.sended_message).id, result=["dummy", "data"])
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == ["dummy", "data"]
@pytest.mark.asyncio
async def test_receive_invalid_id_in_response_should_send_an_error() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCResponse(id=1, result=["dummy", "data"])
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == [message]
assert isinstance(protocol.sended_message, JsonRPCError)
@pytest.mark.asyncio
async def test_send_request_receive_response_should_work_without_param_type_work() -> None:
protocol = DummyJsonRPCProtocol(None)
r: Any = protocol.send_request("dummy/method", ["dummy", "data"])
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=MessageActionItem(title="hi there")
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert isinstance(a, dict)
assert a == {"title": "hi there"}
@pytest.mark.asyncio
async def test_receive_response_should_work_with_pydantic_model() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], MessageActionItem)
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=MessageActionItem(title="hi there")
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == MessageActionItem(title="hi there")
@pytest.mark.asyncio
async def test_receive_response_should_work_with_converter() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], lambda v: [MessageActionItem.parse_obj(e) for e in v])
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=[MessageActionItem(title="hi there")]
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == [MessageActionItem(title="hi there")]
@pytest.mark.asyncio
async def test_receive_response_should_work_with_generic_list() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], List[MessageActionItem])
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=[MessageActionItem(title="hi there")]
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == [MessageActionItem(title="hi there")]
@pytest.mark.asyncio
async def test_receive_response_with_generic_dict_should_return_unchanged() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], List[Dict[str, Any]])
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=[MessageActionItem(title="hi there")]
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == [MessageActionItem(title="hi there").dict()]
| 2.203125 | 2 |
Lib/site-packages/miscutils/functions.py | fochoao/cpython | 0 | 12788731 | from __future__ import annotations
import ast
import inspect
import os
import sys
import traceback
from typing import Optional, Any, Callable
from collections.abc import Iterable
from subtypes import Str
from pathmagic import Dir
def is_running_in_ipython() -> bool:
"""Returns True if run from within a jupyter ipython interactive session, else False."""
try:
assert __IPYTHON__
return True
except (NameError, AttributeError):
return False
def executed_within_user_tree() -> bool:
"""Returns True if the '__main__' module is within the branches of the current user's filesystem tree, else False."""
main_dir = sys.modules["__main__"]._dh[0] if is_running_in_ipython() else sys.modules["__main__"].__file__
return Dir.from_home() > os.path.abspath(main_dir)
def issubclass_safe(candidate: Any, ancestor: Any) -> bool:
"""Returns True the candidate is a subclass of the ancestor, else False. Will return false instead of raising TypeError if the candidate is not a class."""
try:
return issubclass(candidate, ancestor)
except TypeError:
return False
def is_non_string_iterable(candidate: Any) -> bool:
return False if isinstance(candidate, (str, bytes)) else isinstance(candidate, Iterable)
def class_name(candidate: Any) -> str:
cls = candidate if isinstance(candidate, type) or issubclass_safe(candidate, type) else type(candidate)
try:
return cls.__name__
except AttributeError:
return Str(cls).slice.after_last("'").slice.before_first("'")
def traceback_from_exception(ex: Exception) -> str:
return "".join(traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__))
def beep() -> None:
"""Cross-platform implementation for producing a beeping sound. Only works on windows when used in an interactive IPython session (jupyter notebook)."""
if is_running_in_ipython():
import winsound
winsound.Beep(frequency=440, duration=2*1000)
else:
print("\a")
def get_short_lambda_source(lambda_func: Callable) -> Optional[str]:
"""Return the source of a (short) lambda function. If it's impossible to obtain, return None."""
try:
source_lines, _ = inspect.getsourcelines(lambda_func)
except (IOError, TypeError):
return None
if len(source_lines) != 1:
return None
source_text = os.linesep.join(source_lines).strip()
source_ast = ast.parse(source_text)
lambda_node = next((node for node in ast.walk(source_ast) if isinstance(node, ast.Lambda)), None)
if lambda_node is None:
return None
lambda_text = source_text[lambda_node.col_offset:]
lambda_body_text = source_text[lambda_node.body.col_offset:]
min_length = len('lambda:_')
while len(lambda_text) > min_length:
try:
code = compile(lambda_body_text, '<unused filename>', 'eval')
# noinspection PyUnresolvedReferences
if len(code.co_code) == len(lambda_func.__code__.co_code):
return lambda_text
except SyntaxError:
pass
lambda_text = lambda_text[:-1]
lambda_body_text = lambda_body_text[:-1]
return None
| 2.40625 | 2 |
turbulenz_local/lib/__init__.py | turbulenz/turbulenz_local | 12 | 12788732 | <reponame>turbulenz/turbulenz_local
# Copyright (c) 2011,2013 Turbulenz Limited
| 0.785156 | 1 |
analysis/src/python/data_analysis/preprocessing/build_issues.py | eartser/hyperstyle-analyze | 1 | 12788733 | import argparse
import logging
import sys
from typing import Dict
import pandas as pd
from analysis.src.python.data_analysis.model.column_name import IssuesColumns, SubmissionColumns
from analysis.src.python.data_analysis.utils.df_utils import read_df, write_df
from analysis.src.python.data_analysis.utils.parsing_utils import str_to_dict
def get_issues(issues: str, issue_class_column: str, issue_type_column: str, issues_types: Dict[str, str]):
""" Extracts issues classes and types from list with issue reports. """
for issue in str_to_dict(issues):
issues_types[issue[issue_class_column]] = issue.get(issue_type_column, 'Issues type undefined')
def get_issues_classes(issue_column_name: str,
issue_class_column: str,
issue_type_column: str,
submissions_with_issues_path: str,
issues_path: str):
""" Extracts all issues classes and types from lists with issue reports in submissions with issues dataset. """
logging.info(f'Reading submissions with issues from: {submissions_with_issues_path}')
df_submissions_with_issues = read_df(submissions_with_issues_path)
issues_types = {}
logging.info('Getting issues class and type from submissions with issues dataset')
df_submissions_with_issues[issue_column_name].apply(
lambda d: get_issues(d, issue_class_column, issue_type_column, issues_types))
logging.info(f'Saving issues classes and types to: {issues_path}')
write_df(pd.DataFrame.from_dict({
IssuesColumns.CLASS: issues_types.keys(),
IssuesColumns.TYPE: issues_types.values(),
}), issues_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('issues_type', type=str, help='Type of issues to analyse (can be raw or qodana).',
choices=[SubmissionColumns.RAW_ISSUES, SubmissionColumns.QODANA_ISSUES])
parser.add_argument('submissions_path', type=str, help='Path to .csv file with submissions with issues.')
parser.add_argument('issues_path', type=str, help='Path to .csv file where issues info will be saved')
args = parser.parse_args(sys.argv[1:])
issues_type = SubmissionColumns(args.issues_type)
issue_type_column_name = SubmissionColumns.ISSUE_TYPE
if issues_type == SubmissionColumns.QODANA_ISSUES:
issue_class_column_name = SubmissionColumns.QODANA_ISSUE_CLASS
else:
issue_class_column_name = SubmissionColumns.RAW_ISSUE_CLASS
get_issues_classes(issues_type,
issue_class_column_name,
issue_type_column_name,
args.submissions_path,
args.issues_path)
| 2.78125 | 3 |
histoprint/cli.py | mbhall88/histoprint | 0 | 12788734 | """Module containing the CLI programs for histoprint."""
import numpy as np
import click
from histoprint import *
import histoprint.formatter as formatter
@click.command()
@click.argument("infile", type=click.Path(exists=True, dir_okay=False, allow_dash=True))
@click.option(
"-b",
"--bins",
type=str,
default="10",
help="Number of bins or space-separated bin edges.",
)
@click.option("-t", "--title", type=str, default="", help="Title of the histogram.")
@click.option(
"--stack/--nostack", type=bool, default=False, help="Stack the histograms."
)
@click.option(
"-s/-S",
"--summary/--nosummary",
type=bool,
default=False,
help="Print summary statistics.",
)
@click.option(
"-l",
"--label",
"labels",
type=str,
multiple=True,
default=("",),
help="Labels for the data, one for each column.",
)
@click.option(
"--symbols",
type=str,
default=formatter.DEFAULT_SYMBOLS,
help="Symbol cycle for multiple histograms. Choices & default: '%s'"
% (formatter.DEFAULT_SYMBOLS,),
)
@click.option(
"--fg-colors",
type=str,
default=formatter.DEFAULT_FG_COLORS,
help="Colour cycle for foreground colours. Default: '%s', Choices: '0rgbcmykwRGBCMYKW'"
% (formatter.DEFAULT_FG_COLORS,),
)
@click.option(
"--bg-colors",
type=str,
default=formatter.DEFAULT_BG_COLORS,
help="Colour cycle for background colours. Default: '%s', Choices: '0rgbcmykwRGBCMYKW'"
% (formatter.DEFAULT_BG_COLORS,),
)
@click.option(
"-f",
"--field",
"fields",
type=str,
multiple=True,
help="Which fields to histogram. Interpretation of the fields depends on "
"the file format. TXT files only support integers for column numbers "
"starting at 0. For CSV files, the fields must be the names of the columns "
"as specified in the first line of the file. When plotting from ROOT files, "
"at least one field must be specified. This can either be the path to a "
"single TH1, or one or more paths to TTree branches.",
)
@click.version_option()
def histoprint(infile, **kwargs):
"""Read INFILE and print a histogram of the contained columns.
INFILE can be '-', in which case the data is read from STDIN.
"""
# Try to interpret file as textfile
try:
_histoprint_txt(infile, **kwargs)
exit(0)
except ValueError:
pass
# Try to interpret file as CSV file
try:
_histoprint_csv(infile, **kwargs)
exit(0)
except ImportError:
click.echo("Cannot try CSV file format. Pandas module not found.", err=True)
except UnicodeDecodeError:
pass
# Try to interpret file as ROOT file
try:
_histoprint_root(infile, **kwargs)
exit(0)
except ImportError:
click.echo("Cannot try ROOT file format. Uproot module not found.", err=True)
click.echo("Could not interpret file format.", err=True)
exit(1)
def _bin_edges(kwargs, data):
"""Get the desired bin edges."""
bins = kwargs.pop("bins", "10")
bins = np.fromiter(bins.split(), dtype=float)
if len(bins) == 1:
bins = int(bins[0])
if isinstance(bins, int):
minval = np.inf
maxval = -np.inf
for d in data:
minval = min(minval, np.nanmin(d))
maxval = max(maxval, np.nanmax(d))
bins = np.linspace(minval, maxval, bins + 1)
return bins
def _histoprint_txt(infile, **kwargs):
"""Interpret file as as simple whitespace separated table."""
# Read the data
data = np.loadtxt(click.open_file(infile), ndmin=2)
data = data.T
# Interpret field numbers
fields = kwargs.pop("fields", [])
if len(fields) > 0:
try:
fields = [int(f) for f in fields]
except ValueError:
click.echo("Fields for a TXT file must be integers.", err=True)
exit(1)
try:
data = data[fields]
except KeyError:
click.echo("Field out of bounds.", err=True)
exit(1)
# Interpret bins
bins = _bin_edges(kwargs, data)
# Create the histogram(s)
hist = [[], bins]
for d in data:
hist[0].append(np.histogram(d, bins=bins)[0])
# Print the histogram
print_hist(hist, **kwargs)
def _histoprint_csv(infile, **kwargs):
"""Interpret file as as CSV file."""
import pandas as pd
# Read the data
data = pd.read_csv(click.open_file(infile))
# Interpret field numbers/names
fields = list(kwargs.pop("fields", []))
if len(fields) > 0:
try:
data = data[fields]
except KeyError:
click.echo("Unknown column name.", err=True)
exit(1)
# Get default columns labels
if kwargs.get("labels", ("",)) == ("",):
kwargs["labels"] = data.columns
# Convert to array
data = data.to_numpy().T
# Interpret bins
bins = _bin_edges(kwargs, data)
# Create the histogram(s)
hist = [[], bins]
for d in data:
hist[0].append(np.histogram(d, bins=bins)[0])
# Print the histogram
print_hist(hist, **kwargs)
def _histoprint_root(infile, **kwargs):
"""Interpret file as as ROOT file."""
import uproot as up
# Open root file
F = up.open(infile)
# Interpret field names
fields = list(kwargs.pop("fields", []))
if len(fields) == 0:
click.echo("Must specify at least on field for ROOT files.", err=True)
click.echo(F.keys())
exit(1)
# Get default columns labels
if kwargs.get("labels", ("",)) == ("",):
kwargs["labels"] = [field.split("/")[-1] for field in fields]
# Read the data
if len(fields) == 1:
# Possible a single histogram
try:
hist = F[fields[0]].numpy()
except (AttributeError, KeyError):
pass
else:
kwargs.pop("bins", None) # Get rid of useless parameter
print_hist(hist, **kwargs)
return
data = []
for field in fields:
branch = F
for key in field.split("/"):
try:
branch = branch[key]
except KeyError:
click.echo(
"Could not find key '%s'. Possible values: %s"
% (key, branch.keys())
)
exit(1)
try:
d = np.array(branch.array().flatten())
except ValueError:
click.echo(
"Could not interpret root object '%s'. Possible child branches: %s"
% (key, branch.keys())
)
exit(1)
data.append(d)
# Interpret bins
bins = _bin_edges(kwargs, data)
# Create the histogram(s)
hist = [[], bins]
for d in data:
hist[0].append(np.histogram(d, bins=bins)[0])
# Print the histogram
print_hist(hist, **kwargs)
| 2.8125 | 3 |
aranyapythonpb/rpcpb/edgedevice_pb2_grpc.py | arhat-dev/aranya-proto | 0 | 12788735 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import proto_pb2 as proto__pb2
class EdgeDeviceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Sync = channel.stream_stream(
'/aranya.EdgeDevice/Sync',
request_serializer=proto__pb2.Msg.SerializeToString,
response_deserializer=proto__pb2.Cmd.FromString,
)
class EdgeDeviceServicer(object):
"""Missing associated documentation comment in .proto file."""
def Sync(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EdgeDeviceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Sync': grpc.stream_stream_rpc_method_handler(
servicer.Sync,
request_deserializer=proto__pb2.Msg.FromString,
response_serializer=proto__pb2.Cmd.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'aranya.EdgeDevice', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class EdgeDevice(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Sync(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/aranya.EdgeDevice/Sync',
proto__pb2.Msg.SerializeToString,
proto__pb2.Cmd.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 1.789063 | 2 |
Source/game_data.py | JFiedler23/PyInvaders | 0 | 12788736 | <filename>Source/game_data.py
class GameData:
def __init__(self, score=0, curr_level=1, alien_speed=850, alien_y=40):
self.score = score
self.curr_level = curr_level
self.alien_speed = alien_speed
self.alien_y = alien_y
| 2.34375 | 2 |
my_ip/urls.py | OscarMugendi/Django-week3-IP | 0 | 12788737 | from django.conf.urls import url
from django.urls import path, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url('^$', views.home, name='home'),
path('account/', include('django.contrib.auth.urls')),
path('profile/<id>/', views.profile, name='profile'),
path('profile/<id>/update/', views.update_profile, name='update_profile'),
path('project/new/', views.new_project, name='new_project'),
path('project/<title>/reviews/', views.single_project, name='single_project'),
path('project/<title>/', views.single_project, name='project'),
path('project/<id>/review/', views.add_review, name='review'),
url('search/',views.search_projects,name="search"),
url(r'^api/profiles/$', views.ProfileView.as_view(), name='api_profiles'),
url(r'^api/projects/$', views.ProjectView.as_view(), name='api_projects')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | 1.992188 | 2 |
msghandle/logger.py | RaenonX/Jelly-Bot-API | 5 | 12788738 | from extutils.logger import LoggerSkeleton
logger = LoggerSkeleton("sys.handle", logger_name_env="EVT_HANDLER")
| 1.5 | 2 |
src/injecta/service/resolved/NamedArgumentsResolver.py | DataSentics/injecta | 3 | 12788739 | from typing import List, Dict
from injecta.service.argument.ArgumentInterface import ArgumentInterface
from injecta.service.class_.InspectedArgument import InspectedArgument
from injecta.service.resolved.ResolvedArgument import ResolvedArgument
from injecta.service.argument.validator.ArgumentsValidator import ArgumentsValidator
from injecta.service.class_.InspectedArgumentsResolver import InspectedArgumentsResolver
class NamedArgumentsResolver:
def __init__(self):
self.__inspected_arguments_resolver = InspectedArgumentsResolver()
self.__arguments_validator = ArgumentsValidator()
def resolve(self, arguments: List[ArgumentInterface], inspected_arguments: List[InspectedArgument], service_name: str):
inspected_arguments = [inspected_argument for inspected_argument in inspected_arguments if inspected_argument.name != "args"]
inspected_arguments_indexed = {inspected_argument.name: inspected_argument for inspected_argument in inspected_arguments}
arguments_indexed = {argument.name: argument for argument in arguments}
if self.__contains_kwargs(inspected_arguments):
return self.__resolve_arguments_kwargs(arguments_indexed, inspected_arguments_indexed)
for argument_name, argument in arguments_indexed.items():
if argument_name not in inspected_arguments_indexed:
raise Exception(f'Unknown argument "{argument_name}" in service "{service_name}"')
return self.__resolve_arguments(arguments_indexed, inspected_arguments_indexed)
def __resolve_arguments_kwargs(
self, arguments_indexed: Dict[str, ArgumentInterface], inspected_arguments_indexed: Dict[str, InspectedArgument]
):
del inspected_arguments_indexed["kwargs"]
resolved_arguments = self.__resolve_arguments(arguments_indexed, inspected_arguments_indexed)
for resolved_argument in resolved_arguments:
del arguments_indexed[resolved_argument.name]
for _, argument in arguments_indexed.items():
resolved_arguments.append(ResolvedArgument(argument.name, argument, None))
return resolved_arguments
def __resolve_arguments(
self, arguments_indexed: Dict[str, ArgumentInterface], inspected_arguments_indexed: Dict[str, InspectedArgument]
):
resolved_arguments = []
for argument_name, inspected_argument in inspected_arguments_indexed.items():
argument = arguments_indexed[argument_name] if argument_name in arguments_indexed else None
# argument with default value, no value defined in service configuration
if inspected_argument.has_default_value() and argument is None:
continue
resolved_argument = ResolvedArgument(inspected_argument.name, argument, inspected_argument)
resolved_arguments.append(resolved_argument)
return resolved_arguments
def __contains_kwargs(self, inspected_arguments: List[InspectedArgument]):
return inspected_arguments and inspected_arguments[-1].name == "kwargs"
| 2.34375 | 2 |
setup.py | markmuetz/cosmic | 0 | 12788740 | <gh_stars>0
#!/usr/bin/env python
import os
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='cosmic',
version='0.3.0',
description='COSMIC package containing tools and analysis',
long_description=read('README.md'),
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
packages=[
'cosmic.datasets.UM_N1280',
'cosmic.datasets.cmorph',
'cosmic.processing',
],
scripts=[
'bin/cosmic-rsync-jasmin-data',
'bin/cosmic-retrieve-from-mass',
'bin/cosmic-bsub-submit',
'bin/cosmic-bsub-task-submit',
'bin/cosmic-remake-slurm-submit',
],
python_requires='>=3.6',
# These should all be met if you use the conda_env in envs.
install_requires=[
# Commented out for now.
# 'basmati',
# 'remake',
# 'cartopy',
# 'geopandas',
# Causes problems with pip -e . installation and running scripts in bin.
# 'iris',
# 'numpy',
# 'matplotlib',
# 'pandas',
# 'scipy',
],
# url='https://github.com/markmuetz/cosmic',
classifiers=[
'Environment :: Console',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Atmospheric Science',
],
keywords=[''],
)
| 1.757813 | 2 |
c10_tools/inspect.py | atac-bham/c10-tools | 0 | 12788741 | <filename>c10_tools/inspect.py
from collections import OrderedDict
import asyncio
import csv
import os
import sys
from chapter10 import C10
from termcolor import colored
import click
from .common import fmt_number, FileProgress, walk_packets
class Inspect:
KEYS = {
'Channel': 'channel_id',
'Type': 'data_type',
'Sequence': 'sequence_number',
'Size': 'packet_length',
}
# Pairs of (name, width)
# TODO: selectable columns
COLUMNS = OrderedDict((
('Channel', 7),
('Type', 4),
('Sequence', 8),
('Size', 7),
('Time', 27),
('Valid', 5),
('Offset', 15),
))
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
self.cols = self.COLUMNS
def get_size(self):
"""Get total byte size for all files."""
return sum(os.stat(f).st_size for f in self.infile)
def write_header(self):
"""Write out header row for CSV or ASCII."""
if self.writer:
self.writer.writerow(self.cols.keys())
return ''
else:
s = ' | '.join([f'{key:<{width}}'
for key, width in self.cols.items()])
line = '-' * (len(s) + 4)
return f'{line}\n| {s} |\n{line}'
def write_row(self, packet, offset):
"""Pull values from a packet and write output row."""
row = []
for col, width in self.cols.items():
if col == 'Time':
if packet.data_type == 1:
val = 'N/A'
else:
if packet.data_type == 0x11:
self.date_format = packet.date_format
# Julian day or month/year
fmt = '%j %H:%M:%S.%f'
if getattr(self, "date_format", True):
fmt = '%Y-%m-%d %H:%M:%S.%f'
val = packet.get_time().strftime(fmt)
elif col == 'Valid':
val = packet.validate(True) and 'Yes' or 'No'
elif col == 'Offset':
val = offset
elif col in self.KEYS:
val = getattr(packet, self.KEYS[col])
if not self.writer and isinstance(val, (float, int)):
val = fmt_number(val).rjust(width)
row.append(val)
s = ''
if self.writer:
self.writer.writerow(row)
else:
widths = list(self.cols.values())
s = '|'
for i, col in enumerate(row):
s += f' {col:<{widths[i]}} |'
return s
async def get_packet(self, c10):
"""Read and return the next packet from a file or raise
StopAsyncIteration.
"""
try:
packet = next(c10)
assert packet.packet_length == len(bytes(packet)), \
'Packet length incorrect'
except StopIteration:
raise StopAsyncIteration
return packet
def find_sync(self, f):
"""Seek forward in a file to the next sync pattern (eb25)."""
while True:
offset = f.tell()
buffer = f.read(100000)
if not buffer:
raise EOFError
if b'\x25\xeb' in buffer:
f.seek(offset + buffer.find(b'\x25\xeb'))
return f.tell()
def parse_file(self, f, progress):
"""Walk a file and read header information."""
offset = 0
args = {
'--channel': self.channel,
'--exclude': self.exclude,
'--type': self.type,
}
c10 = walk_packets(C10(f), args, include_time=False)
while True:
# Try to read a packet.
try:
packet = asyncio.run(
asyncio.wait_for(self.get_packet(c10), timeout=.1))
progress.write(self.write_row(packet, offset))
progress.update(packet.packet_length)
offset += packet.packet_length
# Report error and retry at the next sync pattern.
except Exception as err:
# Exit if we've read the whole file.
if offset >= os.stat(f.name).st_size:
break
if not isinstance(err, StopAsyncIteration):
msg = f'{err} at {fmt_number(offset)}'
if self.writer is None:
progress.write(colored(msg, 'red'))
else:
progress.write(f'"{msg}"')
try:
f.seek(offset + 1, 1)
sync = self.find_sync(f)
except EOFError:
break
progress.update(sync - offset)
offset = sync
def main(self):
# Use CSV if stdout is redirected
self.writer = None
if sys.stdout == sys.stderr:
pass
elif not sys.stdout.isatty():
self.writer = csv.writer(sys.stdout, lineterminator='')
progress = FileProgress(total=self.get_size(),
disable=self.quiet or self.writer)
header = self.write_header()
progress.write(header)
for f in self.infile:
with open(f, 'rb') as f:
self.parse_file(f, progress)
# Closing line if we're in ASCII mode.
if header:
progress.write(header.split('\n', 1)[0])
@click.command()
@click.argument('infile', nargs=-1)
@click.option('-c', '--channel', type=str, help='Specify channels (comma-separated) to include')
@click.option('-e', '--exclude', type=str, help='Specify channels (comma-separated) to exclude')
@click.option('-t', '--type', type=str, help='Specify datatypes (comma-separated) to include')
@click.pass_context
def inspect(ctx, infile, channel, exclude, type):
"""Report on packets found in a file."""
ctx.ensure_object(dict)
Inspect(
infile=infile,
channel=channel,
exclude=exclude,
type=type,
verbose=ctx.obj.get('verbose'),
quiet=ctx.obj.get('quiet'),
).main() | 2.34375 | 2 |
tests/unittests/analysis/test_lpi.py | obilaniu/orion | 177 | 12788742 | <filename>tests/unittests/analysis/test_lpi.py
# -*- coding: utf-8 -*-
"""Tests :func:`orion.analysis.lpi`"""
import copy
import numpy
import pandas as pd
import pytest
from orion.analysis.base import to_numpy, train_regressor
from orion.analysis.lpi_utils import compute_variances, lpi, make_grid
from orion.core.io.space_builder import SpaceBuilder
data = pd.DataFrame(
data={
"id": ["a", "b", "c", "d"],
"x": [0, 1, 2, 3],
"y": [1, 2, 0, 3],
"objective": [0.1, 0.2, 0.3, 0.5],
}
)
space = SpaceBuilder().build({"x": "uniform(0, 6)", "y": "uniform(0, 3)"})
def test_accept_empty():
"""Tests an empty dataframe is returned if you give an empty dataframe"""
empty_frame = pd.DataFrame()
results = lpi(empty_frame, space)
assert results.columns.tolist() == ["LPI"]
assert results.index.tolist() == list(space.keys())
assert results["LPI"].tolist() == [0, 0]
empty_frame = pd.DataFrame(columns=["x", "y", "objective"])
results = lpi(empty_frame, space)
assert results.columns.tolist() == ["LPI"]
assert results.index.tolist() == list(space.keys())
assert results["LPI"].tolist() == [0, 0]
def test_parameter_not_modified():
"""Tests the original dataframe is not modified"""
original = copy.deepcopy(data)
lpi(data, space)
pd.testing.assert_frame_equal(data, original)
def test_make_grid():
"""Test grid has correct format"""
trials = to_numpy(data, space)
model = train_regressor("RandomForestRegressor", trials)
best_point = trials[numpy.argmin(trials[:, -1])]
grid = make_grid(best_point, space, model, 4)
# Are fixed to anchor value
numpy.testing.assert_equal(grid[0][:, 1], best_point[1])
numpy.testing.assert_equal(grid[1][:, 0], best_point[0])
# Is a grid in search space
numpy.testing.assert_equal(grid[0][:, 0], [0, 2, 4, 6])
numpy.testing.assert_equal(grid[1][:, 1], [0, 1, 2, 3])
def test_make_grid_predictor(monkeypatch):
"""Test grid contains corresponding predictions from the model"""
trials = to_numpy(data, space)
model = train_regressor("RandomForestRegressor", trials)
best_point = trials[numpy.argmin(trials[:, -1])]
# Make sure model is not predicting exactly the original objective
with numpy.testing.assert_raises(AssertionError):
numpy.testing.assert_equal(
best_point[-1], model.predict(best_point[:-1].reshape(1, -1))
)
grid = make_grid(best_point, space, model, 4)
# Verify that grid predictions are those of the model
numpy.testing.assert_equal(grid[0][:, -1], model.predict(grid[0][:, :-1]))
numpy.testing.assert_equal(grid[1][:, -1], model.predict(grid[1][:, :-1]))
# Verify model predictions differ on different points
with numpy.testing.assert_raises(AssertionError):
numpy.testing.assert_equal(grid[0][:, -1], grid[1][:, -1])
def test_compute_variance():
"""Test variance computation over the grid"""
grid = numpy.arange(3 * 5 * 4).reshape(3, 5, 4)
grid[0, :, -1] = 10
grid[1, :, -1] = [0, 1, 2, 3, 4]
grid[2, :, -1] = [0, 10, 20, 30, 40]
variances = compute_variances(grid)
assert variances.shape == (3,)
assert variances[0] == 0
assert variances[1] == numpy.var([0, 1, 2, 3, 4])
assert variances[2] == numpy.var([0, 10, 20, 30, 40])
def test_lpi_results():
"""Verify LPI results in DataFrame"""
results = lpi(data, space, random_state=1)
assert results.columns.tolist() == ["LPI", "STD"]
assert results.index.tolist() == list(space.keys())
# The data is made such that x correlates more strongly with objective than y
assert results["LPI"].loc["x"] > results["LPI"].loc["y"]
def test_lpi_with_categorical_data():
"""Verify LPI can be computed on categorical dimensions"""
data = pd.DataFrame(
data={
"id": ["a", "b", "c", "d"],
"x": [0, 1, 2, 3],
"y": ["b", "c", "a", "d"],
"objective": [0.1, 0.2, 0.3, 0.5],
}
)
space = SpaceBuilder().build(
{"x": "uniform(0, 6)", "y": 'choices(["a", "b", "c", "d"])'}
)
results = lpi(data, space, random_state=1)
assert results.columns.tolist() == ["LPI", "STD"]
assert results.index.tolist() == ["x", "y"]
# The data is made such that x correlates more strongly with objective than y
assert results["LPI"].loc["x"] > results["LPI"].loc["y"]
def test_lpi_with_multidim_data():
"""Verify LPI can be computed on categorical dimensions"""
data = pd.DataFrame(
data={
"id": ["a", "b", "c", "d"],
"x": [[0, 2, 4], [1, 1, 3], [2, 2, 2], [3, 0, 3]],
"y": [["b", "b"], ["c", "b"], ["a", "a"], ["d", "c"]],
"objective": [0.1, 0.2, 0.3, 0.5],
}
)
space = SpaceBuilder().build(
{"x": "uniform(0, 6, shape=3)", "y": 'choices(["a", "b", "c", "d"], shape=2)'}
)
results = lpi(data, space, random_state=1)
assert results.columns.tolist() == ["LPI", "STD"]
assert results.index.tolist() == ["x[0]", "x[1]", "x[2]", "y[0]", "y[1]"]
# The data is made such some x correlates more strongly with objective than other x and most y
assert results["LPI"].loc["x[0]"] > results["LPI"].loc["x[1]"]
assert results["LPI"].loc["x[1]"] > results["LPI"].loc["x[2]"]
assert results["LPI"].loc["x[0]"] > results["LPI"].loc["y[0]"]
assert results["LPI"].loc["x[0]"] > results["LPI"].loc["y[1]"]
def test_lpi_n_points(monkeypatch):
"""Verify given number of points is used"""
N_POINTS = numpy.random.randint(2, 50)
def mock_make_grid(*args, **kwargs):
grid = make_grid(*args, **kwargs)
assert grid.shape == (len(space), N_POINTS, len(space) + 1)
return grid
monkeypatch.setattr("orion.analysis.lpi_utils.make_grid", mock_make_grid)
lpi(data, space, random_state=1, n_points=N_POINTS)
def test_lpi_n_runs(monkeypatch):
"""Verify number of runs"""
N_RUNS = 5
seeds = set()
n_runs = 0
def mock_train_regressor(*args, **kwargs):
nonlocal n_runs
n_runs += 1
seeds.add(kwargs["random_state"])
return train_regressor(*args, **kwargs)
monkeypatch.setattr(
"orion.analysis.lpi_utils.train_regressor", mock_train_regressor
)
lpi(data, space, random_state=1, n_runs=N_RUNS)
assert n_runs == N_RUNS
assert len(seeds) > 0
| 2.390625 | 2 |
src/oaipmh/interfaces.py | unt-libraries/pyoai | 58 | 12788743 | <filename>src/oaipmh/interfaces.py
class IOAI:
def getRecord(metadataPrefix, identifier):
"""Get a record for a metadataPrefix and identifier.
metadataPrefix - identifies metadata set to retrieve
identifier - repository-unique identifier of record
Should raise error.CannotDisseminateFormatError if
metadataPrefix is unknown or not supported by identifier.
Should raise error.IdDoesNotExistError if identifier is
unknown or illegal.
Returns a header, metadata, about tuple describing the record.
"""
def identify():
"""Retrieve information about the repository.
Returns an Identify object describing the repository.
"""
def listIdentifiers(metadataPrefix, set=None, from_=None, until=None):
"""Get a list of header information on records.
metadataPrefix - identifies metadata set to retrieve
set - set identifier; only return headers in set (optional)
from_ - only retrieve headers from from_ date forward (optional)
until - only retrieve headers with dates up to and including
until date (optional)
Should raise error.CannotDisseminateFormatError if metadataPrefix
is not supported by the repository.
Should raise error.NoSetHierarchyError if the repository does not
support sets.
Returns an iterable of headers.
"""
def listMetadataFormats(identifier=None):
"""List metadata formats supported by repository or record.
identifier - identify record for which we want to know all
supported metadata formats. if absent, list all metadata
formats supported by repository. (optional)
Should raise error.IdDoesNotExistError if record with
identifier does not exist.
Should raise error.NoMetadataFormatsError if no formats are
available for the indicated record.
Returns an iterable of metadataPrefix, schema, metadataNamespace
tuples (each entry in the tuple is a string).
"""
def listRecords(metadataPrefix, set=None, from_=None, until=None):
"""Get a list of header, metadata and about information on records.
metadataPrefix - identifies metadata set to retrieve
set - set identifier; only return records in set (optional)
from_ - only retrieve records from from_ date forward (optional)
until - only retrieve records with dates up to and including
until date (optional)
Should raise error.CannotDisseminateFormatError if metadataPrefix
is not supported by the repository.
Should raise error.NoSetHierarchyError if the repository does not
support sets.
Returns an iterable of header, metadata, about tuples.
"""
def listSets():
"""Get a list of sets in the repository.
Should raise error.NoSetHierarchyError if the repository does not
support sets.
Returns an iterable of setSpec, setName tuples (strings).
"""
class IBatchingOAI:
"""Very similar to IOAI, but the implementation can be batch-aware.
Methods that support resumption will get two extra arguments,
cursor and batch_size, which indicate the batch currently being
requested.
"""
def getRecord(metadataPrefix, identifier):
pass
def identify():
pass
def listIdentifiers(metadataPrefix, set=None, from_=None, until=None,
cursor=0, batch_size=10):
pass
def listMetadataFormats(identifier=None):
pass
def listRecords(metadataPrefix, set=None, from_=None, until=None,
cursor=0, batch_size=10):
pass
def listSets():
pass
class IIdentify:
def repositoryName():
"""Name of repository.
"""
def baseURL():
"""Base URL for OAI-PMH requests.
"""
def protocolVersion():
"""OAI-PMH protocol version (should always be '2.0')
"""
def adminEmails():
"""List of email addresses of repository administrators.
"""
def earliestDateStamp():
"""The datetime (datestamp) of the earliest record in repository.
"""
def deletedRecord():
"""Way the repository handles deleted records.
Either 'no', 'transient' or 'persistent'.
"""
def granularity():
"""Datetime granularity of datestamps in repository.
Either YYYY-MM-DD or YYYY-MM-DDThh:mm:ssZ
"""
def compression():
"""List of types of compression schemes supported by repository.
'identity' is the 'do-nothing' scheme.
"""
class IHeader:
def identifier():
"""Repository-unique identifier of this record.
"""
def datestamp():
"""Datetime of creation, last modification or deletion of the record.
This can be used for selective harvesting.
"""
def setSpec():
"""A list of sets this record is a member of.
"""
def isDeleted():
"""If true, record has been deleted.
"""
| 2.578125 | 3 |
esda/tests/test_local_geary_mv.py | jeffcsauer/esda | 145 | 12788744 | import unittest
import libpysal
from libpysal.common import pandas, RTOL, ATOL
from esda.geary_local_mv import Geary_Local_MV
import numpy as np
PANDAS_EXTINCT = pandas is None
class Geary_Local_MV_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(100)
self.w = libpysal.io.open(libpysal.examples.get_path("stl.gal")).read()
f = libpysal.io.open(libpysal.examples.get_path("stl_hom.txt"))
self.y1 = np.array(f.by_col['HR8893'])
self.y2 = np.array(f.by_col['HC8488'])
def test_local_geary_mv(self):
lG_mv = Geary_Local_MV(connectivity=self.w).fit([self.y1, self.y2])
print(lG_mv.p_sim[0])
self.assertAlmostEqual(lG_mv.localG[0], 0.4096931479581422)
self.assertAlmostEqual(lG_mv.p_sim[0], 0.211)
suite = unittest.TestSuite()
test_classes = [
Geary_Local_MV_Tester
]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite)
| 2.34375 | 2 |
packages/pyright-scip/snapshots/output/nested_items/src/__init__.py | sourcegraph/pyright | 0 | 12788745 | <reponame>sourcegraph/pyright
# < definition scip-python pypi snapshot-util 0.1 src/__init__:
#documentation (module) src
| 0.726563 | 1 |
strYa/raw_data_timeline.py | karyna-volokhatiuk/strYa | 3 | 12788746 | '''
Module that makes timeline graphs from csv data.
'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def plot_timeline(file_name):
'''
Makes timeline graphs from csv data.
'''
# data frame from rounded data file
df = pd.read_csv(file_name)
# find all par for graphs
time = df['computer_time']
# plotting
fig, (x_acc_1, y_acc_1, x_gyro_1, y_gyro_1, x_acc_2,
y_acc_2, x_gyro_2, y_gyro_2) = plt.subplots(8, 1)
x_acc_1.plot(time, df['x_acc_1'].tolist())
x_acc_1.set_title('x_acc_1')
y_acc_1.plot(time, df['y_acc_1'].tolist())
y_acc_1.set_title('y_acc_1')
x_gyro_1.plot(time, df['x_gyro_1'].tolist())
x_gyro_1.set_title('x_gyro_1')
y_gyro_1.plot(time, df['y_gyro_1'].tolist())
y_gyro_1.set_title('y_gyro_1')
x_acc_2.plot(time, df['x_acc_2'].tolist())
x_acc_2.set_title('x_acc_2')
y_acc_2.plot(time, df['y_acc_2'].tolist())
y_acc_2.set_title('y_acc_2')
x_gyro_2.plot(time, df['x_gyro_2'].tolist())
x_gyro_2.set_title('x_gyro_2')
y_gyro_2.plot(time, df['y_gyro_2'].tolist())
y_gyro_2.set_title('y_gyro_2')
fig.subplots_adjust(hspace=0.5)
plt.show()
# plt.savefig(new)
# if __name__ == "__main__":
# plot_timeline('walking.csv')
| 3.59375 | 4 |
profile_app/urls.py | saif-ali5589/Profile-rest-api | 0 | 12788747 | <gh_stars>0
from django.urls import path , include
from rest_framework.routers import DefaultRouter
from profile_app import views
router = DefaultRouter()
router.register('hello-viewset',views.HelloViewSet,base_name='hello-viewset')
router.register('profile',views.UserProfileViewSet)
router.register('feed',views.UserProfileFeedViewSet)
#we dont need to specify the base name because we already define queryset which automatically take base call
#as_view functon is invild funtion for api view
urlpatterns = [
path('hello-view/',views.HelloApiViews.as_view()),
path('login/',views.UserLoginApiView.as_view()),
path('',include(router.urls)),
]
| 2.125 | 2 |
genomics_gans/lit_modules.py | Unique-Divine/GANs-for-Genomics | 1 | 12788748 | import pytorch_lightning as pl
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from typing import List, Tuple, Dict, Any, Union, Iterable
try:
import genomics_gans
except:
exec(open('__init__.py').read())
import genomics_gans
from genomics_gans.prepare_data.data_modules import TabularDataset
class LitFFNN(pl.LightningModule):
# ----------------------------------
# Initialize constants and NN architecture
# ----------------------------------
def __init__(self, network: nn.Module, train_set: TabularDataset,
val_set: TabularDataset, test_set: TabularDataset):
""" Feed-Forward Neural Network System
Args:
X (np.ndarray): Feature matrix
"""
super().__init__()
# TODO: train-val-test splits
self.network = network
# Hard-coded constants
self.loss_fn = nn.NLLLoss()
self.lr = 1e-2
self.N_CLASSES = 3
self.epoch = 0
self.epoch_train_losses = []
self.epoch_val_losses = []
self.best_val_epoch = 0
def forward(self, x):
logits = self.network(x)
return logits
def configure_optimizers(self):
optimizer = torch.optim.Adam(
params = self.parameters(), lr = self.lr)
return optimizer
# ----------------------------------
# Training, validation, and test steps
# ----------------------------------
def training_step(self, batch, batch_idx):
x, y = batch
y = y.flatten().long()
logits = self(x)
loss = self.loss_fn(logits, y)
self.log('train_loss', loss, on_step=True, on_epoch=True,
prog_bar=True)
return loss
def validation_step(self, batch, batch_idx, val=True):
x, y = batch
y = y.flatten().long()
# compute loss
logits = self(x)
loss = self.loss_fn(logits, y)
self.log('val_loss', loss, on_step=True, on_epoch=True,
prog_bar=True) # self.log interacts with TensorBoard
return loss
def test_step(self, batch, batch_idx):
x, y = batch
y = y.flatten().long()
# compute loss
logits = self(x)
loss = self.loss_fn(logits, y)
self.log('test_loss', loss, on_step=True, on_epoch=True,
prog_bar=False)
return loss
def training_epoch_end(self, outputs: List[Any]):
outputs: List[torch.Tensor] = [list(d.values())[0] for d in outputs]
sum = torch.zeros(1, dtype=float).to(self.device)
for batch_idx, batch_loss in enumerate(outputs):
sum += batch_loss.to(self.device)
avg_batch_loss = (sum / batch_idx)
self.epoch_train_losses.append({avg_batch_loss[0].item()})
def validation_epoch_end(self, outputs: List[Any]):
sum = torch.zeros(1, dtype=float).to(self.device)
for batch_idx, batch_loss in enumerate(outputs):
sum += batch_loss.to(self.device)
avg_batch_loss = (sum / batch_idx)
self.epoch_val_losses.append({avg_batch_loss[0].item()})
# ---------------------------------------------------------------
# Custom training for evolutionary algorithm
# --------------------------------------------------------------
def custom_training_step(self, verbose=False):
self.network.train()
train_loader = self.train_dl
train_loss: float = 0
for idx, batch in enumerate(train_loader):
self.optimizer.zero_grad() # clears paramter gradient buffers
inputs, targets = batch
# transfer batch data to computation device
inputs, targets = [
tensor.to(self.device) for tensor in [inputs, targets]]
targets = targets.long() # converts dtype to Long
output = self.network(inputs)
loss = self.loss_fn(output, targets.flatten())
loss.backward() # back propagation
self.optimizer.step() # update model weights
train_loss += loss.data.item()
if (idx % 10 == 0) and verbose:
print(f"epoch {self.epoch+1}/{self.n_epochs}, "
+ f"batch {idx}.")
train_loss = train_loss / len(train_loader)
return train_loss
def custom_validation_step(self):
val_loader = self.test_dl
val_loss = 0.0
self.network.eval()
for batch in val_loader:
inputs, targets = batch
inputs, targets = [tensor.to(self.device) for tensor in batch]
targets = targets.long() # converts dtype to Long
output = self.network(inputs)
loss = self.loss_fn(output, targets.flatten())
val_loss += loss.data.item()
val_loss = val_loss / len(val_loader)
return val_loss
def custom_train(self, n_epochs, plot=True, verbose=False, plot_train=False):
train_loader = self.train_dl
val_loader = self.test_dl
device=self.device
self.network.to(self.device)
train_losses, val_losses = [], []
best_val_loss = np.infty
best_val_epoch = 0
early_stopping_buffer = 10
epoch = 0
best_params = None
for epoch in range(n_epochs):
# Training
train_loss = self.custom_training_step()
train_losses.append(train_loss)
# Validation
val_loss = self.custom_validation_step()
val_losses.append(val_loss)
if val_loss < best_val_loss:
best_params = self.network.parameters()
best_val_loss = val_loss
best_val_epoch = epoch
# If validation loss fails to decrease for some number of epochs
# end training
if np.abs(epoch - best_val_epoch) > early_stopping_buffer:
break
print(f"Epoch: {epoch}, Training Loss: {train_loss:.3f}, "
+f"Validation loss: {val_loss:.3f}")
#self.network.parameters = best_params
self.best_val_loss = best_val_loss
self.best_val_epoch = best_val_epoch
if plot:
skip_frames = 3
fig, ax = plt.subplots()
fig.tight_layout()
if plot_train:
ax.plot(np.arange(epoch + 1)[skip_frames:],
train_losses[skip_frames:], '-', label="training set")
ax.plot(np.arange(epoch + 1)[skip_frames:],
val_losses[skip_frames:], '-', label="test set")
ax.set(xlabel="Epoch", ylabel="Loss")
ax.legend()
plt.show()
# ----------------------------------
# Helper functions - Use post-training
# ----------------------------------
def predict(self, x: torch.Tensor) -> torch.Tensor:
self.eval()
x.to(self.device)
logits = self.network(x)
preds = torch.argmax(input = logits, dim=1)
return preds
def accuracy(self, pred: torch.Tensor, target: torch.Tensor):
self.eval()
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred, target = [t.to(self.device) for t in [pred, target]]
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
tensors = [torch.Tensor(t).to(self.device) for t in [pred, target]]
pred, target = tensors
else:
raise ValueError("The types of `pred` and `target` must match. "
+ "These can be np.ndarrays or torch.Tensors.")
accuracy = pl.metrics.functional.accuracy(pred, target)
return accuracy
def f1(self, pred: torch.Tensor, target: torch.Tensor):
self.eval()
pred, target = [t.flatten() for t in [pred, target]]
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred, target = [t.to(self.device) for t in [pred, target]]
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
tensors = [torch.Tensor(t).to(self.device) for t in [pred, target]]
pred, target = tensors
else:
raise ValueError("The types of `pred` and `target` must match. "
+ "These can be np.ndarrays or torch.Tensors.")
f1 = pl.metrics.functional.f1(
preds = pred, target = target, num_classes = 3, multilabel = True)
return f1
def multiclass_aucroc(self, pred: torch.Tensor, target: torch.Tensor):
self.eval()
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred, target = [t.to(self.device) for t in [pred, target]]
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
tensors = [torch.Tensor(t).to(self.device) for t in [pred, target]]
pred, target = tensors
else:
raise ValueError("The types of `pred` and `target` must match. "
+ "These can be np.ndarrays or torch.Tensors.")
auc_roc = pl.metrics.functional.classification.multiclass_auroc(
pred = pred, target = target)
return auc_roc
def plot_losses(self, plot_train=True):
skip_frames = 1
fig, ax = plt.subplots()
fig.tight_layout()
n_epochs = len(self.epoch_val_losses)
self.epoch_train_losses = [s.pop() for s in self.epoch_train_losses]
self.epoch_val_losses = [s.pop() for s in self.epoch_val_losses]
if plot_train:
n_epochs = len(self.epoch_train_losses)
ax.plot(np.arange(n_epochs)[skip_frames:],
self.epoch_train_losses[skip_frames:], label="train")
ax.plot(np.arange(n_epochs)[skip_frames:],
self.epoch_val_losses[1:][skip_frames:], label="val")
ax.set(xlabel="Epoch", ylabel="Loss")
ax.legend()
plt.show()
| 2.515625 | 3 |
pybaseball/cache/file_utils.py | reddigari/pybaseball | 650 | 12788749 | import json
import os
import pathlib
from typing import Any, Dict, List, Union, cast
JSONData = Union[List[Any], Dict[str, Any]]
# Splitting this out for testing with no side effects
def mkdir(directory: str) -> None:
return pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
# Splitting this out for testing with no side effects
def remove(filename: str) -> None:
return os.remove(filename)
def safe_jsonify(directory: str, filename: str, data: JSONData) -> None:
mkdir(directory)
fname = os.path.join(directory, filename)
with open(fname, 'w') as json_file:
json.dump(data, json_file)
def load_json(filename: str) -> JSONData:
with open(filename) as json_file:
return cast(JSONData, json.load(json_file))
| 3.03125 | 3 |
src/runner.py | tandriamil/bcs-aes256-ctr | 0 | 12788750 | #!/usr/bin/python3
"""
Script for generating the data set (128b, 256b, 1kB, 1MB, 100MB, 1GB).
Context : Projet BCS - Master 2 SSI - Istic (Univ. Rennes1)
Authors : <NAME> and <NAME>
This script also executes the time measurement into 4 contexts
=> Sequential encryption
=> Sequential decryption
=> Parallel encryption
=> Parallel decryption
"""
import os
import time
import subprocess
from collections import OrderedDict
import pygal
B_NUMBER = 1024
B_SIZE = 16
SIXTEEN_B = None
DATASET_DIR = './dataset/'
REPORT_DIR = './report/'
DATASET_EXTENSION = '.i'
CIPHERED_EXTENSION = '.encrypted'
UNCIPHERED_EXTENSION = '.decrypted'
EXEC_NAME = './myAE.exe'
FILESIZES = OrderedDict([
('128b', 16),
('256b', 32),
('1kB', 1000),
('1MB', 1000000),
('100MB', 100000000),
('1GB', 1000000000)
])
def generate_file(name, size):
"""Generate an input file containing random bits."""
print('=> Generating %s file' % name)
with open(DATASET_DIR+name+DATASET_EXTENSION, 'wb+') as fout:
fout.write(os.urandom(size))
def generate_dataset():
"""Generate dataset files."""
print(" ### Generating the dataset files ### ")
print('/!\\ This function can take a lot of time /!\\')
# For every filesize, generate the file
for key, value in FILESIZES.items():
generate_file(key, value)
def cipher_execution(op, input, output, password):
"""Launch the encryption and measure the time it took."""
command = [
EXEC_NAME,
op,
input,
'-o',
output,
'-k',
password
]
start_time = time.time()
subprocess.call(command, 1)
end_time = time.time() - start_time
print("%s took %f seconds" % (input, end_time))
return end_time
def generate_encryption_statistics():
"""Generate the figure of encryption time given the input size."""
print("\nGeneration of the encryption statistics:")
# Password
password = 'password'
# The table of the results
results = []
# For every filesize, generate the file
for key in FILESIZES:
results.append(
cipher_execution(
'-c',
DATASET_DIR+key+DATASET_EXTENSION,
DATASET_DIR+key+CIPHERED_EXTENSION,
password
)
)
line_chart = pygal.Line()
line_chart.title = 'Execution time of encryption in sequential mode'
line_chart.x_title = 'Size of input file'
line_chart.x_labels = FILESIZES
line_chart.y_title = 'Execution time in seconds'
line_chart.add('Time', results)
line_chart.render_to_png(REPORT_DIR+'encryption_sequential.png')
def generate_decryption_statistics():
"""Generate the figure of decryption time given the input size."""
print("\nGeneration of the decryption statistics:")
# Password
password = 'password'
# The table of the results
results = []
# For every filesize, generate the file
for key in FILESIZES:
results.append(
cipher_execution(
'-d',
DATASET_DIR+key+CIPHERED_EXTENSION,
DATASET_DIR+key+UNCIPHERED_EXTENSION,
password
)
)
line_chart = pygal.Line()
line_chart.title = 'Execution time of decryption in sequential mode'
line_chart.x_title = 'Size of input file'
line_chart.x_labels = FILESIZES
line_chart.y_title = 'Execution time in seconds'
line_chart.add('Time', results)
line_chart.render_to_png(REPORT_DIR+'decryption_sequential.png')
# Main function to be launched when this script is called
if __name__ == '__main__':
# Generation of the dataset
gen = input("Do you want to generate dataset? [y/n] ")
if gen == 'y':
generate_dataset()
# Process statistics on it
generate_encryption_statistics()
generate_decryption_statistics()
| 2.578125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.