id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1663522
|
"""
Test that we can get a reasonable list of tags from the
data source of /tags
"""
import shutil
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.config import config
from tiddlywebplugins.utils import get_store
from tiddlywebplugins.tank.search import get_indexed_tags
from tiddlywebplugins.tank import init
def setup_module(module):
for dir in ('store', 'indexdir'):
try:
shutil.rmtree(dir)
except: # heavy!
pass
init(config)
store = get_store(config)
store.put(Bag('bagone'))
store.put(Bag('bagtwo'))
module.store = store
def test_for_tags():
tiddler1 = Tiddler('one', 'bagone')
tiddler2 = Tiddler('two', 'bagtwo')
tiddler1.tags = ['alpha', 'beta']
tiddler2.tags = ['alpha', 'gamma']
store.put(tiddler1)
store.put(tiddler2)
tags = get_indexed_tags(config, None)
assert len(tags) == 3
assert sorted(tags) == ['alpha', 'beta', 'gamma']
tags = get_indexed_tags(config, 'bag:"bagone"')
assert len(tags) == 2
assert sorted(tags) == ['alpha', 'beta']
|
StarcoderdataPython
|
1671173
|
# 使用第三方库来创建useragent
from fake_useragent import UserAgent
ua = UserAgent()
print(ua.chrome)
print('='*100)
for i in range(10):
print(ua.random)
|
StarcoderdataPython
|
3288408
|
#!/usr/bin/python
###############################################################################
# enbackup-archive.py - worker script to store backups on external disks
#
# August 2008, <NAME>
#
# Copyright (c) 2008-2012 by Ensoft Ltd. All right reserved
#
# Version 1.0 - all working, does all it should apart from postponing final
# "cp" until off peak hours.
# Version 1.1 - now copy is postponed until 18:00
# Version 1.2 - only open log in child process!
# - check we are root (only important when run manually)
# - don't touch the log file until we have the lock (to avoid
# scribbling over it if duplicate processes were started!)
# - send a mail from parent process, just saying to expect a mail
# from child in the next day (ie. just confirm that udev started
# us correctly)
# Version 1.3 - will now postpone final cp until between set of times
# (so if run late in the day it should still complete the same
# day)
# - allows deleting of quarterly backups as long as a certain
# number of backups still remain (this avoids the most frequent
# need for user intervention)
# Version 1.4 - fix bug where lock file is released even if we didn't get it
# Version 1.5 - build on 1.3 fix to allow deletion of weekly/monthly backups as
# long as we have the minimum number of remaining sets.
# Version 1.6 - report the time taken by the copy - and calculate copy rate.
# Version 1.7 - everything is postponed to 21:00
# - reduce the number of backup sets kept due to increasing backup
# size.
# Version 1.8 - Parallelized the 'rm' and 'du' to achieve a faster run time.
# Version 1.9 - Reduced 'rm' and 'du' parallelization to reduce disk load
# (a candidate trigger for triggering kernel crashes).
# - Added default config file support.
# - Misc fixes to log messages to tidy up unused arguments.
# Version 1.10 - Migrate to lockfile-based locking
# - Use enbackup Logger support for debug/error messages
# - Use 'udevadm info' for more robust detection of USB vs. eSATA
# - Add handling to suspend USB, as well as eSATA, devices,
# before unplugging them.
###############################################################################
#
# Overview: this is the script that the udev infra will call when a disk
# insertion has occured. We register for these events using the config file
# eg. /etc/udev/rules.d/10-enbackups.rules.
#
# In broad strokes, this script will be run when a backup disk is inserted, the
# disk will be mounted, backup sets will be copied to the external disk
# (deleting older backups as required), before finally unmounting the disk and
# sending an email to the administrator.
#
# As this has been triggered off a udev event and might take a considerable
# amount of time to run, we should fork immediately to return control to udev.
#
#
# Work To do:
#
# - would be nice if the output of enbackup-rdiff-to-mirror could be displayed,
# especially useful if it could be shown as it progresses. Probably not too
# hard to do - just need to read lines rather than wait for all input.
#
# - should we include today's date in list passed to "filter_dates"? Probably
# as this counts towards our allowance, though we need to make sure that this
# is given preference over any backup in the last week (especially as it
# doesn't exist, so any attempt to delete it will fail!).
#
import os
import sys
import re
import pwd
import time
import datetime
import subprocess
import traceback
import shutil
from enbackup.utils import DirLock, GlobalLock
import enbackup.utils
import enbackup.log
import argparse
#
# Directory where backup scripts are located.
#
script_dir = "/usr/bin/"
#
# Directory to which backup logs are written.
#
log_output_dir = "/var/log/enbackup/"
log_file_name = os.path.join(log_output_dir, "enbackup-archive.log")
operation_success = False
logfile_email_to = None
logfile_email_to_default = "root@localhost"
#
# Time to wait until to start the archiving at. By default, don't wait
# at all (i.e. start any time from 00:00 until the end of the day).
#
start_at_default = "00:00"
start_at_fmt = "%H:%M"
#
# Definitions of where backups are stored or need copying to.
#
mount_dir = None
mount_dir_default = "/media/usbdisk"
target_dir = None
target_dir_default = "backups"
mirror_dir = "mirror-only"
source_dir = None
source_dir_default = "/enbackup/"
#
# Config file location
#
config_dir = "/etc/enbackup.d/"
#
# Minimum number of "keep" backup sets we need, before we delete
# "keep_optional" ones
#
minimum_backup_sets_required = 1
#
# Logging stuff - debug just writes to file, error raises exception (which we
# catch and display nicely before cleaning up).
#
logger = enbackup.log.Logger("archive")
def write_to_debugfile(str):
if not debugfile.closed:
debugfile.write("%s\n" % (str))
debugfile.flush()
def debug(str):
logger.debug(str)
write_to_debugfile(str)
def error(str):
logger.error(str)
write_to_debugfile(str)
raise Exception(str)
#
# Locks to protect directories we're reading from, as well as a whole
# server-level lock to stop multiple instance of this or other enbackup
# scripts interfering with each other.
#
dir_locks = None
server_lock = GlobalLock("Process {0}, PID {1}".format(sys.argv[0],
os.getpid()))
#
# Acquire the lock to prevent any other rdiff process writing to the output
# directory. We are very heavy handed with the locking to avoid anyone
# managing to somehow screw things up accidentally - one lock per machine.
#
# Note: if this raises exception this will be caught!
#
def get_backup_locks():
global dir_locks
global server_lock
debug("About to acquire global lock")
#
# First, grab the global lock. This stops us interfering with
# other enbackup scripts (and stops other enbackup scripts
# interfering with us).
#
server_lock.lock()
#
# Also grab a lock for all the directories we're about
# to read from, to make sure no other scripts are
# going to be writing to them. We find the set of directories
# by listing the contents of the parent dir. This is a bit
# hacky -- it would be nice to do this based on some notion of
# configured backup sets instead -- but is sufficient for now.
#
# Hack: actually, the above logic isn't quite enough because
# our config has multiple levels of directories for aged vs.
# unaged data. Grab locks for each of those subdirectories
# too: more than we need, but this should be safe.
#
dir_contents = [os.path.join(source_dir, x)
for x in os.listdir(source_dir)]
lock_dirs = [x for x in dir_contents if os.path.isdir(x)]
lock_dirs_aged = [ os.path.join(x, "aged") for x in lock_dirs ]
lock_dirs_unaged = [ os.path.join(x, "unaged") for x in lock_dirs ]
lock_dirs = lock_dirs + lock_dirs_aged + lock_dirs_unaged
debug("About to lock directories {0}".format(lock_dirs))
lock_details = "Time %s, PID %s\n" % (time.ctime(), os.getpid())
dir_locks = [DirLock(lock_details, x) for x in lock_dirs]
for lock in dir_locks:
lock.lock()
debug("Acquired all locks")
return
#
# Release the backup locks - obviously only if this process acquired it.
#
def release_backup_locks():
global dir_locks
global server_lock
unlock_count = 0
if dir_locks:
for lock in dir_locks:
if lock.locked:
lock.unlock()
unlock_count = unlock_count + 1
debug("Released {0} directory locks".format(unlock_count))
if server_lock.locked:
server_lock.unlock()
debug("Lock file released")
else:
debug("Lock file not aquired - do not release")
def sleep_until_between_times(from_time, to_time):
#
# Want to sleep until some time between from_time and to_time - ie. if
# before from_time we sleep until then, if between the two we just go
# for it, if after to_time we sleep until from_time again.
#
# Calculate how long frow now until the time specified - and then sleep
# for that long
#
# Obviously just overwriting the time we want in the current time won't
# guarantee 'then' is in the future - so cope with wrapping.
#
from_hour = from_time[0]
from_min = from_time[1]
to_hour = to_time[0]
to_min = to_time[1]
now = datetime.datetime.today()
debug("Sleeping until between %2.2d:%2.2d and %2.2d:%2.2d "
"(now %2.2d:%2.2d:%2.2d)" %
(from_hour, from_min, to_hour, to_min,
now.hour, now.minute, now.second))
then_start = now.replace(hour=from_hour, minute=from_min, second=0)
then_end = now.replace(hour=to_hour, minute=to_min, second=0)
diff = then_start - now
if (now > then_start) and (now < then_end):
debug("Current time betwen start and end - run now")
else:
if diff.days == -1:
debug("End time specified is in the past - set for tomorrow")
diff += datetime.timedelta(days=1)
if diff.days != 0:
#
# The diff (day, second) should only ever have a "second"
# element, as obviously it won't be more than a day in advance.
#
error("ERROR: time diff is unexpected: %s, %s, %s" %
(diff.days, diff.seconds, diff))
debug("Sleeping for %u seconds" % (diff.seconds))
time.sleep(diff.seconds)
def month_to_quarter(month):
month = int(month)
quarter = (month + 2) / 3
return quarter
def date_to_ages(date, basedate, max_dates):
#
# Convert the date we have to a date, using the basedate as a refence.
# In normal operation basedate is today - but want this controllable
# for testing purposes.
#
# "Age" here not just simply calculated from the number of days between
# two dates, but instead from the week/month/quarter of the year.
#
# The age in months can be categorised simply by inspection of the month
# of the year, the age in quarters can similarly be done if we divide the
# year up (0-2, 3-6, 7-9, 10-12 months). The age in weeks cannot be done
# by inspection of the date alone - but conversion to iso format lets us
# calculate the difference between the ISO week of the two dates (this
# ensures that, for example, if a backup was run on Thursday one week and
# Tuesday the next the difference is a week, even though it is less than
# this).
#
if basedate.toordinal() < date.toordinal():
raise "Invalid basedate - must be more recent than all backup dates"
age_days = basedate.toordinal() - date.toordinal()
if age_days > max_dates["age_days"]:
max_dates["age_days"] = age_days
age_weeks = basedate.isocalendar()[1] - date.isocalendar()[1]
age_weeks += 52 * (basedate.isocalendar()[0] - date.isocalendar()[0])
if age_weeks > max_dates["age_weeks"]:
max_dates["age_weeks"] = age_weeks
age_months = basedate.month - date.month
age_months += 12 * (basedate.year - date.year)
if age_months > max_dates["age_months"]:
max_dates["age_months"] = age_months
age_quarters = (month_to_quarter(basedate.month) -
month_to_quarter(date.month))
age_quarters += 4 * (basedate.year - date.year)
if age_quarters > max_dates["age_quarters"]:
max_dates["age_quarters"] = age_quarters
return {"age_days": age_days,
"age_weeks": age_weeks,
"age_months": age_months,
"age_quarters": age_quarters}
def parse_iso_date(datestr):
#
# Take string in ISO format (YYYY-MM-DD) and return [year, month, day]
# tuple.
#
res = re.match("([0-9]{4})-([0-9]{2})-([0-9]{2})", datestr)
year = int(res.groups()[0])
month = int(res.groups()[1])
day = int(res.groups()[2])
return([year, month, day])
def is_iso_date(datestr):
#
# Return TRUE/FALSE if is/isn't ISO date string
#
is_date = True
try:
parse_iso_date(datestr)
except AttributeError:
is_date = False
return is_date
def filter_dates(dates):
#
# Instead of having a rolling calendar, lets make things simple.
#
# Keep: - 1 a week for last 'weekly_history' weeks - 1 a month for last
# 'monthly_history' months (where month boundaries are as on the calendar,
# not rolling) - 1 a quarter beyond that (again, with static quarter
# boundaries, ie. 0-3, 4-6, 7-9, and 10-12 months).
#
weekly_history = 4
monthly_history = 3 # ie. a quarter!
keep = []
keep_optional = []
#
# Sort the list - newest first
#
dates.sort()
dates.reverse()
debug("Incoming (sorted) dates: %s" % (dates))
basedate = datetime.date.today()
#
# Sort the dates into buckets based on the week, month and year they fall
# in to.
#
# Dictionary version of data array..
#
dates_d = {}
max_dates = {"age_days": 0, "age_weeks": 0,
"age_months": 0, "age_quarters": 0}
for datestr in dates:
year, month, day = parse_iso_date(datestr)
date = datetime.date(year, month, day)
ages = date_to_ages(date, basedate, max_dates)
debug("Date %s is age %s days, %s weeks, %s months, %s quarters" %
(datestr, ages["age_days"], ages["age_weeks"],
ages["age_months"], ages["age_quarters"]))
dates_d[datestr] = ages
#
# We now know the age in each of the various measures. Need 'buckets' for
# every week in that range, every month in that range, and every quarter
# in that range
#
by_week = []
by_month = []
by_quarter = []
for i in range(0, max_dates["age_weeks"] + 1):
by_week.append([])
for i in range(0, max_dates["age_months"] + 1):
by_month.append([])
for i in range(0, max_dates["age_quarters"] + 1):
by_quarter.append([])
#
# For each category we want the buckets to be such that the zeroth element
# is the newest, and within each bucket we want the items to be sortest
# oldest first. This means we typically keep the first item from the top
# N buckets.
#
# Using age to calculate the bucket, and then inserting items (with dates
# already sorted oldest first) does this for us.
#
for date in dates:
by_week[dates_d[date]["age_weeks"]].insert(0, date)
by_month[dates_d[date]["age_months"]].insert(0, date)
by_quarter[dates_d[date]["age_quarters"]].insert(0, date)
#
# Walk through the weekly buckets, decide what to keep..
#
found = 0
for i in range(0, len(by_week)):
if len(by_week[i]) == 0:
debug("Week %s: skipping empty bucket" % (i))
elif found < weekly_history:
debug("Week %s: keeping %s, discarding %s" %
(i, by_week[i][0], by_week[i][1:]))
keep.append(by_week[i][0])
found += 1
else:
debug("Week %s: discarding all: %s" % (i, by_week[i]))
delete = [x for x in dates if x not in keep]
debug(" Keep: %s" % (keep))
debug(" Delete: %s" % (delete))
#
# We now have our weekly backups. When calculating the monthly backups
# we want to include anything we already have marked for keeping..
#
found = 0
for i in range(0, len(by_month)):
if len(by_month[i]) == 0:
debug("Month %s: skipping empty bucket" % (i))
elif found < monthly_history:
#
# First check if we've already got an item from this monthly
# bucket. If so, we can discard all, if not we keep the
# first (oldest).
#
found += 1
overlap = [x for x in keep if x in by_month[i]]
if overlap:
debug("Month %s: skipping - some dates already marked to "
"keep: %s" % (i, ", ".join(overlap)))
else:
debug("Month %s: keeping %s, discarding %s" %
(i, by_month[i][0], by_month[i][1:]))
keep.append(by_month[i][0])
else:
debug("Month %s: discarding all: %s" % (i, by_month[i]))
delete = [x for x in dates if x not in keep]
debug(" Keep: %s" % (keep))
debug(" Delete: %s" % (delete))
#
# We now have our monthly backups too. When calculating the quarterly
# backups we want to include anything we already have marked for keeping.
#
for i in range(0, len(by_quarter)):
if len(by_quarter[i]) == 0:
debug("Quarter %s: skipping empty bucket" % (i))
else:
#
# First check if we've already got an item from this quarterly
# bucket. If so, we can discard all, if not we keep the
# first (oldest).
#
overlap = [x for x in keep if x in by_quarter[i]]
if overlap:
debug("Quarter %s: skipping - some dates already marked to "
"keep: %s" % (i, ", ".join(overlap)))
else:
debug("Quarter %s: keeping %s, discarding %s" %
(i, by_quarter[i][0], by_quarter[i][1:]))
keep_optional.append(by_quarter[i][0])
delete = [x for x in dates if x not in (keep + keep_optional)]
debug(" RESULT: Keep: %s" % (keep))
debug(" RESULT: Keep optional: %s" % (keep_optional))
debug(" RESULT: Delete: %s" % (delete))
debug("")
return{"keep": keep, "keep_optional": keep_optional, "delete": delete}
def normalise_directory (dir):
#
# Normalise a directory by ensuring it has a trailing '/'.
#
if dir[-1] == '/':
return dir
else:
return dir + '/'
def run_cmd(cmd):
#
# Spawn a subprocess (or not if we are in show-only mode), returning
# [retcode, stderr, stdout]
#
debug("Running '%s'" % (" ".join(cmd)))
subproc = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = subproc.communicate()
return([subproc.returncode, stderr, stdout])
def pipe(cmd1, cmd2):
#
# Pipe the output from cmd1 into cmd2 and return the output. We buffer the
# output of the first process to allow it to complete before the second
# process has necessarily dealt with all the output.
#
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, bufsize=-1)
p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
stdout, stderr = p2.communicate()
if p2.returncode != 0:
error("ERROR: failed to run the command '%s | %s': %s\n%s\n" %
((" ".join(cmd1)), (" ".join(cmd2)), p2.returncode, stderr))
return p2.returncode, stdout
def calculate_total_size(cmd):
#
# Given a 'find' command, calculate the total size of all files found
# by that command.
#
cmd.append("-print0")
du_cmd = "xargs -0P2 -n1 du -s".split()
rc, output = pipe(cmd, du_cmd)
#
# Split the output of 'du' on newlines. We don't check the last element
# as that is a blank line.
#
total_size = 0
if rc == 0:
a = output.split("\n")
for line in a[0:-1]:
size = re.match('[0-9]+', line)
total_size += int(size.group())
return rc, total_size
def calculate_directory_size(dir):
#
# The directory size is calculated by:
# a) summing the size of directories 4 deep,
# b) summing the size of all files upto 4 deep,
# c) counting the directories upto 3 deep and allowing 4 bytes for each
# directory,
# d) adding all the above.
#
# This is not necessarily accurate as it assumes that the default block
# size is 4k and that all directories are 4k.
#
dir = normalise_directory(dir)
path_regexp = "%s*/*/*/*" % dir
cmd = ["find", dir, "-maxdepth", "4", "-type", "d", "-path", path_regexp]
rc, size1 = calculate_total_size(cmd)
if rc == 0:
cmd = ["find", dir, "-maxdepth", "4", "-type", "f"]
rc, size2 = calculate_total_size(cmd)
if rc == 0:
path_regexp = "%s*" % dir
cmd = ["find", dir, "-maxdepth", "3", "-type", "d",
"-path", path_regexp]
rc, err, out = run_cmd(cmd)
size3 = 4 * (len(out.split("\n")) - 1)
#
# Return the size in Mb and round up.
#
if rc == 0:
size = (size1 + size2 + size3 + 1023) / 1024
else:
size = 0
return rc, size
def get_free_space_from_df(dir):
rc, err, out = run_cmd(["df", "-m", "-P", dir])
if rc != 0:
error("ERROR: failed to determine available free space: %s\n%s\n" %
(rc, err))
free_space = int(out.split("\n")[1].split()[3])
return free_space
def check_sufficient_free_space(dir, size_needed, is_nfs):
free_space_known = False
while not free_space_known:
free_space = get_free_space_from_df(dir)
if is_nfs:
# On nfs, df sometimes updates slowly in the background
# after deleting a file. In case that's happening, wait
# a few seconds, check df again, and if the reported value
# is different repeat until it's stable.
time.sleep(10)
new_free_space = get_free_space_from_df(dir)
if new_free_space == free_space:
free_space_known = True
else:
debug("Free space on NFS dropped from {}MB to {}MB "
"while waiting, trying again".format(free_space,
new_free_space))
else:
free_space_known = True
debug("Free space available on disk: %u Mb (%u Mb needed)" %
(free_space, size_needed))
return (free_space >= size_needed)
def remove_old_backup(old_backup):
#
# Removes a directory by spawning multiple copies of 'rm'.
# Arg can be either a directory or a tar file.
#
rc = 0
if old_backup.endswith(".tar"):
debug("Removing tar file '%s'" % old_backup)
os.remove(old_backup)
else:
dir = normalise_directory(old_backup)
debug("Removing directory '%s'" % dir)
path_regexp = "%s*/*/*/*" % dir
find_cmd = ["find", dir, "-maxdepth", "4", "-type", "d",
"-path", path_regexp, "-print0"]
rm_cmd = "xargs -0P2 -n1 rm -rf".split()
rc, stdout = pipe(find_cmd, rm_cmd)
shutil.rmtree(dir)
return rc
def device_is_usb(devname):
#
# Check whether the device is USB by using 'udevadm info' to check which
# bus it's attached to.
#
is_usb = False
rc, err, out = run_cmd(["/sbin/udevadm",
"info",
"-a",
"-n", "/dev/{0}".format(devname)])
if rc != 0:
error("ERROR: unable to check type of device {0}: {1}\n{2}\n".
format(devname, rc, err))
else:
for line in out.rsplit("\n"):
if re.match(r".*looking at device .*/usb[0-9]/.*", line):
is_usb = True
return is_usb
def switch_user(new_gid, new_uid):
os.setegid(new_gid)
os.seteuid(new_uid)
def main(device, is_nfs, is_tar):
#
# This script can hammer our servers at a time when people notice.
# Be nice. By default this process runs at a negative nice value,
# presumably because udev starts us.
#
debug("Reniced to %u" % (os.nice(40)))
#
# First we need to know the device that has been inserted - this is
# passed in env variable
#
if device is None:
if not os.environ.has_key("DEVPATH"):
debug(" argv: %s" % (sys.argv))
debug(" environ:")
for item in os.environ:
debug(" %s: %s" % (item, os.environ[item]))
error("No 'DEVPATH' item - cannot mount harddisk")
dev = os.environ["DEVPATH"]
else:
dev = device
if not is_nfs:
dev = dev.split("/")[-1]
debug("Got device %s" % (dev))
#
# Would ideally identify the eSATA explicitly, but without plugging it in
# I can't easily do that - so for now infer that were aren't eSATA if we
# are USB :)
#
if not is_nfs:
is_usb = device_is_usb(dev)
if not is_usb:
debug("Device is eSATA")
esata_disk = True
else:
debug("Device is USB")
esata_disk = False
else:
debug("Device is NFS")
#
# Need some locking here - ideally working with main backup script locking
# to stop them interfering with each other.
#
get_backup_locks()
#
# Next check that no other disk is already mounted, and mount this
# disk if not
#
enbackup.utils.makedirs(mount_dir, logger, False)
if os.path.ismount(mount_dir):
error("Unexpected - disk is already mounted. "
"Give up to avoid clashing")
if is_nfs:
mount_args = [dev, mount_dir]
else:
mount_args = ["-text3", "/dev/%s" % dev, mount_dir]
rc, err, out = run_cmd(["/bin/mount"] + mount_args)
if rc != 0:
error("ERROR: failed to mount device %s (%s): %d\n%s\n" % (dev,
mount_args,
rc,
err))
# Drop down to the enbackup user's uid/gid, while writing files.
# Switch to Enbackup's home directory, so that subprocesses (e.g. 'find')
# don't hit errors if they try to switch back to root's home dir
# after this point.
new_user = enbackup.utils.get_username()
new_uid = pwd.getpwnam(new_user).pw_uid
new_gid = pwd.getpwnam(new_user).pw_gid
switch_user(new_gid, new_uid)
new_home_dir = os.path.expanduser("~{}".format(new_user))
debug("Switching cwd to {}".format(new_home_dir))
os.chdir(new_home_dir)
#
# Need to know the size of the backup directory
#
start_time = time.time()
rc, backup_size = calculate_directory_size(source_dir)
elapsed_time = time.time() + 1 - start_time
elapsed_time_str = str(datetime.timedelta(0, elapsed_time, 0)).split(".")[0]
if rc != 0:
error("ERROR: failed to get backup directory size %s: %d\n%s\n" %
(source_dir, rc, err))
debug("'du' completed in %s, backup directory size is %u Mb" %
(elapsed_time_str, backup_size))
#
# We only keep a single mirror+increment - so move the old one to
# mirror-only directory, and delete the increment part.
#
# Obviously exclude anything that isn't a backup (ie. doesn't fit
# the YYYY-MM-DD format).
#
debug("Looking for mirror/increments to make into mirror-only")
start_time = time.time()
rdiff_to_mirror_cmd = os.path.join(script_dir,
"enbackup-rdiff-to-mirror.py")
backup_dir_full = os.path.join(mount_dir, target_dir)
mirror_dir_full = os.path.join(backup_dir_full, mirror_dir)
enbackup.utils.makedirs(backup_dir_full, logger, False)
enbackup.utils.makedirs(mirror_dir_full, logger, False)
to_move = os.listdir(backup_dir_full)
for backup in to_move:
backup_date = backup
old_backup_is_tar = False
# Check whether the old backup is a tar file, since that affects how
# we remove files.
if backup_date.endswith(".tar"):
backup_date = backup_date[:-4]
old_backup_is_tar = True
if is_iso_date(backup_date):
backup_old = os.path.join(backup_dir_full, backup)
backup_new = os.path.join(mirror_dir_full, backup)
if old_backup_is_tar:
rc, err, out = run_cmd(["tar", "-f", backup_old,
"--delete", "--wildcards",
"*/rdiff-backup-data"])
else:
rc, err, out = run_cmd([rdiff_to_mirror_cmd, backup_old])
if rc != 0:
error("ERROR: failed to shrink old backup %s: %d\n%s\n" %
(backup_old, rc, err))
os.rename(backup_old, backup_new)
elapsed_time = time.time() + 1 - start_time
elapsed_time_str = str(datetime.timedelta(0, elapsed_time, 0)).split(".")[0]
debug("Mirror/increment to mirror-only conversion completed in %s" % elapsed_time_str)
#
# Work out which backup sets we should keep and delete.
#
res = filter_dates(os.listdir(mirror_dir_full))
#
# Delete all backups marked for deletion, keep those for optional
# deletion (ie. quarterlies).
#
debug("Deleting old backups")
start_time = time.time()
for directory in res["delete"]:
full_directory = os.path.join(mirror_dir_full, directory)
debug("Deleting old backup directory: %s" % (full_directory))
rc = remove_old_backup(full_directory)
if rc != 0:
error("ERROR: failed to delete %s as part of the old backups" %
full_directory)
#
# See if we have enough free space without deleting quarterlies, if not
# enough then delete quarterlies as long as we have a minimum number of
# non-quarterly backup sets.. If even that doesn't give us enough space
# then delete the older "keep" sets, as long as we keep a minimum of
# three.
#
if check_sufficient_free_space(backup_dir_full, backup_size, is_nfs):
debug("Got enough free space without deleting quarterlies")
elif len(res["keep"]) < minimum_backup_sets_required:
error("Insufficient disk space, and not enough 'keep' directories to "
"zap quarterlies (have %s, need %s)" %
(len(res["keep"]), minimum_backup_sets_required))
else:
possible_deletes = (res["keep"][minimum_backup_sets_required:] +
res["keep_optional"])
while True:
if len(possible_deletes) == 0:
error("Not enough space for backup, and no (more) backups to "
"delete (needed %s Mb)" % (backup_size))
directory = possible_deletes.pop()
if directory in res["keep_optional"]:
dir_type = "optional"
else:
dir_type = "weekly/monthly"
full_directory = os.path.join(mirror_dir_full, directory)
debug("Deleting old backup directory (%s): %s" %
(dir_type, full_directory))
rc = remove_old_backup(full_directory)
if rc != 0:
error("ERROR: failed to delete %s as part of the old backups" %
full_directory)
if check_sufficient_free_space(backup_dir_full, backup_size, is_nfs):
debug("Deleting backup sets has created enough free space")
break
elapsed_time = time.time() + 1 - start_time
elapsed_time_str = str(datetime.timedelta(0, elapsed_time, 0)).split(".")[0]
debug("Deleting old backups completed in %s" % elapsed_time_str)
#
# OK - we have the space we need, so copy over the current
# mirror+increment
#
output_dir = os.path.join(backup_dir_full,
datetime.date.today().isoformat())
debug("Copying current backup mirror/increment %s to %s" %
(source_dir, output_dir))
start_time = time.time()
#rc, err, out = run_cmd(["cp", "-a", source_dir, output_dir])
# @@@ Tar test, to preserve ACLs and also speed up copy to NFS
output_tar = output_dir + ".tar"
rc, err, out = run_cmd(["tar", "--acls", "-cpf", output_tar, source_dir])
#
# Add one to elapsed time to avoid any chance of divide by zero and
# discard the sub-second part of the elapsed time
# (eg. "0:00:08.839987" -> 0:00:08)
#
elapsed_time = time.time() + 1 - start_time
elapsed_time_str = str(datetime.timedelta(0, elapsed_time, 0)).split(".")[0]
if rc != 0:
error("ERROR: failed to copy %s to %s (elapsed time %s): %d\n%s\n%s\n" %
(source_dir, output_dir, elapsed_time_str, rc, out, err))
else:
debug("Copy completed: %u Mb, time taken %s, rate %u Mb/s" %
(backup_size, elapsed_time_str,
round(float(backup_size) / elapsed_time)))
# Switch back to root
switch_user(0, 0)
#
# Unmount the disk
#
rc, err, out = run_cmd(["umount", mount_dir])
if rc != 0:
error("ERROR: failed to unmount disk: %d\n%s\n" % (rc, err))
if not is_nfs:
#
# Remove the device using the appropriate helper script, depending on
# whether it's USB or eSATA:
#
if esata_disk:
remove_cmd = os.path.join(script_dir, "enbackup-sata-remove.sh")
else:
remove_cmd = os.path.join(script_dir, "enbackup-suspend-usb-device.sh")
m = re.match(r"(sd[a-z])[0-9]", dev)
if m == None:
error("ERROR: failed to extract base device name from {0}".
format(dev))
basedev = m.group(1)
remove_dev = "/dev/{0}".format(basedev)
rc, err, out = run_cmd([remove_cmd, remove_dev])
if rc != 0:
error("ERROR: failed to remove device {0}: {1}\n{2}\n".format(
remove_dev, rc, err))
debug("Successfully completed!!")
operation_success = True
if __name__ == "__main__":
#
# Check we are root - if not barf. Should only hit this if being run
# manually, as udev will always be run as root.
#
if (os.geteuid() != 0):
print("ERROR: enbackup-archive.py must be run as root - aborting. "
"(EUID %s)" % (os.geteuid()))
sys.exit(-1)
parser = argparse.ArgumentParser(description="Run an external backup")
parser.add_argument("--device", action="store",
help="Specify the external device to back up to.")
parser.add_argument("--nfs", action="store_true",
help="Device is an NFS path.")
parser.add_argument("--tar", action="store_true",
help="Store archive as a tar file.")
args = parser.parse_args(sys.argv[1:])
#
# Read the configuration from the file (or fall back to defaults if the
# file does not exist, or the option isn't specified).
#
config = enbackup.utils.DefaultConfigParser(
{ "Logging" :
{ "log_email": logfile_email_to_default },
"Paths" :
{ "src_dir": source_dir_default,
"tgt_dir": target_dir_default,
"mount_point": mount_dir_default, },
"Options" :
{ "start_at": start_at_default }})
config.read(os.path.join(config_dir, "enbackup-archive.rc"))
logfile_email_to = config.get("Logging", "log_email")
source_dir = config.get("Paths", "src_dir")
target_dir = config.get("Paths", "tgt_dir")
mount_dir = config.get("Paths", "mount_point")
start_at_string = config.get("Options", "start_at")
try:
start_at_ts = datetime.datetime.strptime(start_at_string,
start_at_fmt)
start_at = [start_at_ts.hour, start_at_ts.minute]
except:
print("ERROR: start time is not in expected format, "
"expect {1}, given {0} - aborting".format(start_at_fmt,
start_at_string))
sys.exit(-1)
#
# This script takes far to long to run for the caller to be blocked,
# so spawn a separate child process to do the work, with the parent
# process just exiting leaving the udev script free to carry on.
#
# Fork call returns zero to the child process, and child's PID returned
# to parent process
#
retval = os.fork()
if retval == 0:
#
# Only open the log file in the child process!
#
debugfile = open(log_file_name, "w")
try:
#
# We postpone all processing to save thrashing the disk with
# "du" and "cp".
#
sleep_until_between_times(start_at, [23,59])
main(args.device, args.nfs, args.tar)
except:
#
# Want to catch exceptions and print outputs, but not cause it to
# stop cleanup!
#
debug("Exception caught - cleaning up")
debug("".join(traceback.format_exc()))
# Switch back to root, in case we failed in main() before getting
# a chance to do so.
switch_user(0, 0)
else:
operation_success = True
release_backup_locks()
debugfile.close()
#
# Mail the logfile to the specified email address - appending
# something more descriptive than just the raw log!
#
if logfile_email_to != None:
if operation_success:
prefix = "SUCCESS"
body = "Good news! Copying of backup to external disk has "\
"completed.\n\n"
body += "Please disconnect the external disk, and if "\
"necessary connect the next disk.\n\n"
body += "Full log is below - this is for information only\n\n"
body += "\n\n\n\n\n\n\n\n"
body += "%s\n" % ("#" * 79)
else:
prefix = "FAILURE"
body = "INVESTIGATION REQUIRED!\n"
body += "An error occured when copying backups to "\
"external disk.\n\n"
body += "The full log is below, please investigate and "\
"fix this.\n\n"
body += "Before re-running the backup archiving you will "\
"need to unmount the disk.\n\n"
body += "\n\n%s\n" % ("#" * 79)
subject = "{0}: Backup archiving to external disk complete".format(
prefix)
tmpfile = open("/tmp/enbackup-archive.child", "w")
tmpfile.write(body)
for line in open(log_file_name).readlines():
tmpfile.write(line)
tmpfile.close()
retval = os.popen('mail -s "%s" %s < %s' %
(subject, logfile_email_to, tmpfile.name)).close()
if retval:
print("Failed to email results to (%s): %d\n" %
(logfile_email_to, retval))
else:
#
# Send a mail from the parent process just to say that the backup has
# been kicked off successfully.
#
subject = "Backup archiving to external disk started"
body = "Copying of backups to external disk has started - "
body += "process %d has been spawned.\n\n" % (retval)
body += "NOTE: if you do not get a mail tomorrow then something has "
body += "probably gone wrong. "
body += "In that case check out the log file at {0}\n\n".format(
log_file_name)
tmpfile = open("/tmp/enbackup-archive.parent", "w")
tmpfile.write(body)
tmpfile.close()
retval = os.popen('mail -s "%s" %s < %s' %
(subject, logfile_email_to, tmpfile.name)).close()
if retval:
print("Failed to email results to (%s): %d\n" %
(logfile_email_to, retval))
|
StarcoderdataPython
|
188507
|
import os
import numpy as np
import pytest
from config.stage import ConfigStage
from extract.stage import ExtractStage
from preprocess.stage import PreprocessStage
@pytest.mark.parametrize("action", ['train'])
def test_extract_stage(action):
path = os.path.abspath(os.path.join(__file__, "../../..", 'resources/config.json'))
config = ConfigStage().run(path)
images, labels = ExtractStage().run(config, action)
images, labels = PreprocessStage().run(images, labels, config, action)
assert isinstance(images, np.ndarray)
assert isinstance(labels, np.ndarray)
|
StarcoderdataPython
|
3278513
|
import plotly.graph_objects as go
def make_figure(df):
fig = go.Figure( )
fig.update_layout( width=600, height=600)
fig.add_trace(go.Scatter(x=df["x"].tolist(), y=df["y"].tolist() ))
fig.update_layout(
title={
'text': "Demo plotly title",
'xanchor': 'left',
'yanchor': 'top' ,
"font": {"size": 25, "color":"black" } } )
return fig
|
StarcoderdataPython
|
1698184
|
<reponame>ArlenCHEN/IntraDA<filename>ADVENT/advent/domain_adaptation/config.py
# --------------------------------------------------------
# Configurations for domain adaptation
# Copyright (c) 2019 valeo.ai
#
# Written by <NAME>
# Adapted from https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/fast_rcnn/config.py
# --------------------------------------------------------
import os.path as osp
import numpy as np
from easydict import EasyDict
from advent.utils import project_root
from advent.utils.serialization import yaml_load
cfg = EasyDict()
# cfg.label_class = 'city'
# cfg.SOURCE = 'GTA'
# cfg.TARGET = 'Cityscapes'
cfg.label_class = 'outdoor'
cfg.SOURCE = 'RUGD'
cfg.TARGET = 'RELLIS'
# Number of workers for dataloading
cfg.NUM_WORKERS = 4
# SOURCE
if cfg.SOURCE == 'GTA':
cfg.DATA_LIST_SOURCE = str(project_root / 'advent/dataset/gta5_list/{}.txt')
cfg.DATA_DIRECTORY_SOURCE = str(project_root / 'data/GTA5')
elif cfg.SOURCE == 'SYNTHIA':
cfg.DATA_LIST_SOURCE = str(project_root / 'advent/dataset/synthia_list/{}.txt')
cfg.DATA_DIRECTORY_SOURCE = str(project_root / 'data/SYNTHIA')
elif cfg.SOURCE == 'Cityscapes':
cfg.DATA_LIST_SOURCE = str(project_root / 'advent/dataset/cityscapes_list/{}.txt')
cfg.DATA_DIRECTORY_SOURCE = str(project_root / 'data/Cityscapes')
elif cfg.SOURCE == 'KITTI':
cfg.DATA_LIST_SOURCE = str(project_root / 'advent/dataset/kitti_list/{}.txt')
cfg.DATA_DIRECTORY_SOURCE = str(project_root / 'data/KITTI')
elif cfg.SOURCE == 'RUGD':
cfg.DATA_LIST_SOURCE = str(project_root / 'advent/dataset/rugd_list/{}.txt')
cfg.DATA_DIRECTORY_SOURCE = str(project_root / 'data/RUGD')
elif cfg.SOURCE == 'RELLIS':
cfg.DATA_LIST_SOURCE = str(project_root / 'advent/dataset/rellis_list/{}.txt')
cfg.DATA_DIRECTORY_SOURCE = str(project_root / 'data/RELLIS')
else:
raise NotImplementedError(f"Not yet supported for SOURCE {cfg.SOURCE}")
# TARGET
if cfg.TARGET == 'GTA':
cfg.DATA_LIST_TARGET = str(project_root / 'advent/dataset/gta5_list/{}.txt')
cfg.DATA_DIRECTORY_TARGET = str(project_root / 'data/GTA5')
elif cfg.TARGET == 'SYNTHIA':
cfg.DATA_LIST_TARGET = str(project_root / 'advent/dataset/synthia_list/{}.txt')
cfg.DATA_DIRECTORY_TARGET = str(project_root / 'data/SYNTHIA')
elif cfg.TARGET == 'Cityscapes':
cfg.DATA_LIST_TARGET = str(project_root / 'advent/dataset/cityscapes_list/{}.txt')
cfg.DATA_DIRECTORY_TARGET = str(project_root / 'data/Cityscapes')
elif cfg.TARGET == 'KITTI':
cfg.DATA_LIST_TARGET = str(project_root / 'advent/dataset/kitti_list/{}.txt')
cfg.DATA_DIRECTORY_TARGET = str(project_root / 'data/KITTI')
elif cfg.TARGET == 'RUGD':
cfg.DATA_LIST_TARGET = str(project_root / 'advent/dataset/rugd_list/{}.txt')
cfg.DATA_DIRECTORY_TARGET = str(project_root / 'data/RUGD')
elif cfg.TARGET == 'RELLIS':
cfg.DATA_LIST_TARGET = str(project_root / 'advent/dataset/rellis_list/{}.txt')
cfg.DATA_DIRECTORY_TARGET = str(project_root / 'data/RELLIS')
elif cfg.TARGET == 'SELF':
cfg.DATA_LIST_TARGET = str(project_root / 'advent/dataset/self_list/{}.txt')
cfg.DATA_DIRECTORY_TARGET = str(project_root / 'data/SELF')
else:
raise NotImplementedError(f"Not yet supported for TARGET {cfg.TARGET}")
if cfg.label_class == 'city':
cfg.NUM_CLASSES = 19
elif cfg.label_class == 'outdoor':
cfg.NUM_CLASSES = 24
else:
raise NotImplementedError("Not yet supported for label class {cfg.label_class}")
# Exp dirs
cfg.EXP_NAME = ''
cfg.EXP_ROOT = project_root / 'experiments'
cfg.EXP_ROOT_SNAPSHOT = osp.join(cfg.EXP_ROOT, 'snapshots')
cfg.EXP_ROOT_LOGS = osp.join(cfg.EXP_ROOT, 'logs')
# CUDA
cfg.GPU_ID = 0
# TRAIN CONFIGS
cfg.TRAIN = EasyDict()
# cfg.TRAIN.SET_SOURCE = 'all'
if cfg.SOURCE == 'GTA':
cfg.TRAIN.SET_SOURCE = 'only_1' # for GTA 5
else:
cfg.TRAIN.SET_SOURCE = 'train' # for GTA 5
cfg.TRAIN.SET_TARGET = 'train'
cfg.TRAIN.BATCH_SIZE_SOURCE = 1
cfg.TRAIN.BATCH_SIZE_TARGET = 1
cfg.TRAIN.IGNORE_LABEL = 255
# SOURCE IMAGE SIZE
if cfg.SOURCE == 'GTA':
cfg.TRAIN.INPUT_SIZE_SOURCE = (640, 360) # Half of original GTA 5 size
elif cfg.SOURCE == 'SYNTHIA':
pass
elif cfg.SOURCE == 'Cityscapes':
cfg.TRAIN.INPUT_SIZE_SOURCE = (512, 256) # Half of original Cityscapes size
elif cfg.SOURCE == 'KITTI':
pass
elif cfg.SOURCE == 'RUGD':
cfg.TRAIN.INPUT_SIZE_SOURCE = (688, 550) # Original RUGD size
elif cfg.SOURCE == 'RELLIS':
cfg.TRAIN.INPUT_SIZE_SOURCE = (960, 600) # Half of original RELLIS size
else:
raise NotImplementedError(f"Not yet supported for SOURCE {cfg.SOURCE}")
# TARGET IMAGE SIZE
if cfg.TARGET == 'GTA':
cfg.TRAIN.INPUT_SIZE_TARGET = (640, 360) # Half of original GTA 5 size
elif cfg.TARGET == 'SYNTHIA':
pass
elif cfg.TARGET == 'Cityscapes':
cfg.TRAIN.INPUT_SIZE_TARGET = (512, 256) # Half of original Cityscapes size
elif cfg.TARGET == 'KITTI':
pass
elif cfg.TARGET == 'RUGD':
cfg.TRAIN.INPUT_SIZE_TARGET = (688, 550) # Original RUGD size
elif cfg.TARGET == 'RELLIS':
cfg.TRAIN.INPUT_SIZE_TARGET = (960, 600) # Half of original RELLIS size
elif cfg.TARGET == 'SELF':
pass
else:
raise NotImplementedError(f"Not yet supported for TARGET {cfg.TARGET}")
# Class info
cfg.TRAIN.INFO_SOURCE = ''
if cfg.label_class == 'city':
cfg.TRAIN.INFO_TARGET = str(project_root / 'advent/dataset/cityscapes_list/info.json')
elif cfg.label_class == 'outdoor':
cfg.TRAIN.INFO_TARGET = str(project_root / 'advent/dataset/rugd_list/info_out.json')
else:
raise NotImplementedError(f"Not yet supported for label class {cfg.label_class}")
# Segmentation network params
cfg.TRAIN.MODEL = 'DeepLabv2'
cfg.TRAIN.MULTI_LEVEL = True
cfg.TRAIN.RESTORE_FROM = ''
if cfg.SOURCE == 'GTA':
cfg.TRAIN.IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)
elif cfg.SOURCE == 'SYNTHIA':
pass
elif cfg.SOURCE == 'Cityscapes':
pass
elif cfg.SOURCE == 'KITTI':
pass
elif cfg.SOURCE == 'RUGD':
cfg.TRAIN.IMG_MEAN = np.array((123.675, 116.28, 103.53), dtype=np.float32)
elif cfg.SOURCE == 'RELLIS':
cfg.TRAIN.IMG_MEAN = np.array((123.675, 116.28, 103.53), dtype=np.float32)
else:
raise NotImplementedError(f"Not yet supported for SOURCE {cfg.SOURCE}")
cfg.TRAIN.LEARNING_RATE = 2.5e-4
cfg.TRAIN.MOMENTUM = 0.9
cfg.TRAIN.WEIGHT_DECAY = 0.0005
cfg.TRAIN.POWER = 0.9
cfg.TRAIN.LAMBDA_SEG_MAIN = 1.0
cfg.TRAIN.LAMBDA_SEG_AUX = 0.1 # weight of conv4 prediction. Used in multi-level setting.
# Domain adaptation
cfg.TRAIN.DA_METHOD = 'AdvEnt'
# Adversarial training params
cfg.TRAIN.LEARNING_RATE_D = 1e-4
cfg.TRAIN.LAMBDA_ADV_MAIN = 0.001
cfg.TRAIN.LAMBDA_ADV_AUX = 0.0002
# MinEnt params
cfg.TRAIN.LAMBDA_ENT_MAIN = 0.001
cfg.TRAIN.LAMBDA_ENT_AUX = 0.0002
# Other params
cfg.TRAIN.MAX_ITERS = 250000
cfg.TRAIN.EARLY_STOP = 120000
cfg.TRAIN.SAVE_PRED_EVERY = 2000
cfg.TRAIN.SNAPSHOT_DIR = ''
cfg.TRAIN.RANDOM_SEED = 1234
cfg.TRAIN.TENSORBOARD_LOGDIR = ''
cfg.TRAIN.TENSORBOARD_VIZRATE = 100
# TEST CONFIGS
cfg.TEST = EasyDict()
cfg.TEST.MODE = 'best' # {'single', 'best'}
# model
cfg.TEST.MODEL = ('DeepLabv2',)
cfg.TEST.MODEL_WEIGHT = (1.0,)
cfg.TEST.MULTI_LEVEL = (True,)
if cfg.SOURCE == 'GTA':
cfg.TEST.IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)
elif cfg.SOURCE == 'SYNTHIA':
pass
elif cfg.SOURCE == 'Cityscapes':
pass
elif cfg.SOURCE == 'KITTI':
pass
elif cfg.SOURCE == 'RUGD':
cfg.TEST.IMG_MEAN = np.array((123.675, 116.28, 103.53), dtype=np.float32)
elif cfg.SOURCE == 'RELLIS':
cfg.TEST.IMG_MEAN = np.array((123.675, 116.28, 103.53), dtype=np.float32)
else:
raise NotImplementedError(f"Not yet supported for SOURCE {cfg.SOURCE}")
cfg.TEST.RESTORE_FROM = ('',)
cfg.TEST.SNAPSHOT_DIR = ('',) # used in 'best' mode
cfg.TEST.SNAPSHOT_STEP = 2000 # used in 'best' mode
cfg.TEST.SNAPSHOT_MAXITER = 120000 # used in 'best' mode
# Test sets
cfg.TEST.SET_TARGET = 'val'
cfg.TEST.BATCH_SIZE_TARGET = 1
if cfg.TARGET == 'Cityscapes':
cfg.TEST.INPUT_SIZE_TARGET = (512, 256)
cfg.TEST.OUTPUT_SIZE_TARGET = (2048, 1024)
cfg.TEST.INFO_TARGET = str(project_root / 'advent/dataset/cityscapes_list/info.json')
elif cfg.TARGET == 'RUGD':
cfg.TEST.INPUT_SIZE_TARGET = (688, 550) # This should be the same as TRAIN.INPUT_SIZE_TARGET
cfg.TEST.OUTPUT_SIZE_TARGET = (688, 550) # ? This should be the same as the original label image?
cfg.TEST.INFO_TARGET = str(project_root / 'advent/dataset/rugd_list/info_out.json')
else:
pass
if cfg.TARGET == 'GTA':
pass
elif cfg.TARGET == 'SYNTHIA':
pass
elif cfg.TARGET == 'Cityscapes':
cfg.TEST.INPUT_SIZE_TARGET = (512, 256)
cfg.TEST.OUTPUT_SIZE_TARGET = (2048, 1024)
cfg.TEST.INFO_TARGET = str(project_root / 'advent/dataset/cityscapes_list/info.json')
elif cfg.TARGET == 'KITTI':
pass
elif cfg.TARGET == 'RUGD':
cfg.TEST.INPUT_SIZE_TARGET = (688, 550) # This should be the same as TRAIN.INPUT_SIZE_TARGET
cfg.TEST.OUTPUT_SIZE_TARGET = (688, 550) # ? This should be the same as the original label image?
cfg.TEST.INFO_TARGET = str(project_root / 'advent/dataset/rugd_list/info_out.json')
elif cfg.TARGET == 'RELLIS':
cfg.TEST.INPUT_SIZE_TARGET = (960, 600) # This should be the same as TRAIN.INPUT_SIZE_TARGET
cfg.TEST.OUTPUT_SIZE_TARGET = (1920, 1200) # ? This should be the same as the original label image?
cfg.TEST.INFO_TARGET = str(project_root / 'advent/dataset/rugd_list/info_out.json')
elif cfg.TARGET == 'SELF':
pass
else:
raise NotImplementedError(f"Not yet supported for TARGET {cfg.TARGET}")
cfg.TEST.WAIT_MODEL = True
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not EasyDict:
return
for k, v in a.items():
# a must specify keys that are in b
# if not b.has_key(k):
if k not in b:
raise KeyError(f'{k} is not a valid config key')
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(f'Type mismatch ({type(b[k])} vs. {type(v)}) '
f'for config key: {k}')
# recursively merge dicts
if type(v) is EasyDict:
try:
_merge_a_into_b(a[k], b[k])
except Exception:
print(f'Error under config key: {k}')
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options.
"""
yaml_cfg = EasyDict(yaml_load(filename))
_merge_a_into_b(yaml_cfg, cfg)
|
StarcoderdataPython
|
3248008
|
"""Provide an interface to data, parameters and results
A :class:`DataHandle` is passed in to a :class:`Model` at runtime, to provide
transparent access to the relevant data and parameters for the current
:class:`ModelRun` and iteration. It gives read access to parameters and input
data (at any computed or pre-computed timestep) and write access to output data
(at the current timestep).
"""
from copy import copy
from logging import getLogger
from types import MappingProxyType
from typing import Dict, List, Optional, Union
import numpy as np # type: ignore
from smif.data_layer.data_array import DataArray
from smif.data_layer.store import Store
from smif.exception import SmifDataError
from smif.metadata import RelativeTimestep
class DataHandle(object):
"""Get/set model parameters and data
"""
def __init__(self, store: Store, modelrun_name, current_timestep, timesteps, model,
decision_iteration=None):
"""Create a DataHandle for a Model to access data, parameters and state, and to
communicate results.
Parameters
----------
store : Store
Backing store for inputs, parameters, results
modelrun_name : str
Name of the current modelrun
current_timestep : str
timesteps : list
model : Model
Model which will use this DataHandle
decision_iteration : int, default=None
ID of the current Decision iteration
"""
self.logger = getLogger(__name__)
self._store = store
self._modelrun_name = modelrun_name
self._current_timestep = current_timestep
self._timesteps = timesteps
self._decision_iteration = decision_iteration
self._model_name = model.name
self._inputs = model.inputs
self._outputs = model.outputs
self._model = model
modelrun = self._store.read_model_run(self._modelrun_name)
sos_model = self._store.read_sos_model(modelrun['sos_model'])
self._scenario_dependencies = {} # type: Dict[str, Dict]
self._model_dependencies = {} # type: Dict[str, Dict]
scenario_variants = modelrun['scenarios']
self._load_dependencies(sos_model, scenario_variants)
self.logger.debug(
"Create with %s model, %s scenario dependencies",
len(self._scenario_dependencies),
len(self._model_dependencies))
self._parameters = {} # type: Dict[str, DataArray]
self._load_parameters(sos_model, modelrun['narratives'])
def _load_dependencies(self, sos_model, scenario_variants):
"""Load Model dependencies as a dict with {input_name: list[Dependency]}
"""
for dep in sos_model['model_dependencies']:
if dep['sink'] == self._model_name:
input_name = dep['sink_input']
self._model_dependencies[input_name] = {
'source_model_name': dep['source'],
'source_output_name': dep['source_output'],
'type': 'model'
}
for dep in sos_model['scenario_dependencies']:
if dep['sink'] == self._model_name:
input_name = dep['sink_input']
self._scenario_dependencies[input_name] = {
'source_model_name': dep['source'],
'source_output_name': dep['source_output'],
'type': 'scenario',
'variant': scenario_variants[dep['source']]
}
def _load_parameters(self, sos_model, concrete_narratives):
"""Load parameter values for model run
Parameters for each of the contained sector models are loaded
into memory as a data_handle is initialised.
Firstly, default values for the parameters are loaded from the parameter
specs contained within each of the sector models
Then, the data from the list of narrative variants linked to the current
model run are loaded into the parameters contained within the
Arguments
---------
sos_model : dict
A configuration dictionary of a system-of-systems model
concrete_narratives: dict of list
Links narrative names to a list of variants to furnish parameters
with values {narrative_name: [variant_name, ...]}
"""
# Populate the parameters with their default values
for parameter in self._model.parameters.values():
self._parameters[parameter.name] = \
self._store.read_model_parameter_default(self._model.name, parameter.name)
# Load in the concrete narrative and selected variants from the model run
for narrative_name, variant_names in concrete_narratives.items():
# Load the narrative
try:
narrative = [x for x in sos_model['narratives']
if x['name'] == narrative_name][0]
except IndexError:
msg = "Couldn't find a match for {} in {}"
raise IndexError(msg.format(narrative_name, sos_model['name']))
self.logger.debug("Loaded narrative: %s", narrative)
self.logger.debug("Considering variants: %s", variant_names)
# Read parameter data from each variant, later variants overriding
# previous parameter values
for variant_name in variant_names:
try:
parameter_list = narrative['provides'][self._model.name]
except KeyError:
parameter_list = []
for parameter in parameter_list:
da = self._store.read_narrative_variant_data(
sos_model['name'],
narrative_name, variant_name, parameter
)
self._parameters[parameter].update(da)
def derive_for(self, model):
"""Derive a new DataHandle configured for the given Model
Parameters
----------
model : Model
Model which will use this DataHandle
"""
return DataHandle(
store=self._store,
modelrun_name=self._modelrun_name,
current_timestep=self._current_timestep,
timesteps=list(self.timesteps),
model=model,
decision_iteration=self._decision_iteration
)
def __getitem__(self, key):
if key in self._parameters:
return self.get_parameter(key)
elif key in self._inputs:
return self.get_data(key)
elif key in self._outputs:
return self.get_results(key)
else:
raise KeyError(
"'%s' not recognised as input, output or parameter for '%s'" %
(key, self._model_name)
)
def __setitem__(self, key, value):
if hasattr(value, 'as_ndarray'):
raise TypeError("Pass in a numpy array")
self.set_results(key, value)
@property
def current_timestep(self):
"""Current timestep
"""
return self._current_timestep
@property
def previous_timestep(self):
"""Previous timestep
"""
return RelativeTimestep.PREVIOUS.resolve_relative_to(
self._current_timestep,
self._timesteps
)
@property
def base_timestep(self):
"""Base timestep
"""
return RelativeTimestep.BASE.resolve_relative_to(
self._current_timestep,
self._timesteps
)
@property
def timesteps(self):
"""All timesteps (as tuple)
"""
return tuple(self._timesteps)
@property
def decision_iteration(self):
return self._decision_iteration
def get_state(self):
"""The current state of the model
If the DataHandle instance has a timestep, then state is
established from the state file.
Returns
-------
list of tuple
A list of (intervention name, build_year) installed in the current timestep
Raises
------
ValueError
If self._current_timestep is None an error is raised.
"""
if self._current_timestep is None:
raise ValueError("You must pass a timestep value to get state")
else:
sos_state = self._store.read_state(
self._modelrun_name,
self._current_timestep,
self._decision_iteration
)
return sos_state
def get_current_interventions(self):
"""Get the interventions that exist in the current state
Returns
-------
dict of dicts
A dict of intervention dicts with build_year attribute keyed by name
"""
state = self.get_state()
current_interventions = {}
all_interventions = self._store.read_interventions(self._model_name)
for decision in state:
name = decision['name']
build_year = decision['build_year']
try:
serialised = all_interventions[name]
serialised['build_year'] = build_year
current_interventions[name] = serialised
except KeyError:
# ignore if intervention is not in current set
pass
msg = "State matched with %s interventions"
self.logger.info(msg, len(current_interventions))
return current_interventions
def get_data(self, input_name: str, timestep=None) -> DataArray:
"""Get data required for model inputs
Parameters
----------
input_name : str
timestep : RelativeTimestep or int, optional
defaults to RelativeTimestep.CURRENT
Returns
-------
smif.data_layer.data_array.DataArray
Contains data annotated with the metadata and provides utility methods
to access the data in different ways
Raises
------
SmifDataError
If any data reading error occurs below this method, the error is
handled and reraised within the context of the current call
"""
if input_name not in self._inputs:
raise KeyError(
"'{}' not recognised as input for '{}'".format(input_name, self._model_name))
timestep = self._resolve_timestep(timestep)
dep = self._resolve_source(input_name)
self.logger.debug(
"Read %s %s %s", dep['source_model_name'], dep['source_output_name'],
timestep)
if dep['type'] == 'scenario':
data = self._get_scenario(dep, timestep, input_name)
else:
input_spec = self._inputs[input_name]
data = self._get_result(dep, timestep, input_spec)
return data
def _resolve_timestep(self, timestep):
"""Resolves a relative timestep to an absolute timestep
Arguments
---------
timestep : RelativeTimestep or int
Returns
-------
int
"""
if self._current_timestep is None:
if timestep is None:
raise ValueError("You must provide a timestep to obtain data")
elif hasattr(timestep, "resolve_relative_to"):
timestep = timestep.resolve_relative_to(self._timesteps[0], self._timesteps)
else:
assert isinstance(timestep, int) and timestep in self._timesteps
else:
if timestep is None:
timestep = self._current_timestep
elif hasattr(timestep, "resolve_relative_to"):
timestep = timestep.resolve_relative_to(self._current_timestep,
self._timesteps)
else:
assert isinstance(timestep, int) and timestep <= self._current_timestep
return timestep
def _get_result(self, dep, timestep, input_spec) -> DataArray:
"""Retrieves a model result for a dependency
"""
output_spec = copy(input_spec)
output_spec.name = dep['source_output_name']
self.logger.debug("Getting model result for %s via %s from %s",
input_spec, dep, output_spec)
try:
data = self._store.read_results(
self._modelrun_name,
dep['source_model_name'], # read from source model
output_spec, # using source model output spec
timestep,
self._decision_iteration
)
data.name = input_spec.name # ensure name matches input (as caller expects)
except SmifDataError as ex:
msg = "Could not read data for output '{}' from '{}' in {}, iteration {}"
raise SmifDataError(msg.format(
output_spec.name,
dep['source_model_name'],
timestep,
self._decision_iteration
)) from ex
return data
def _get_scenario(self, dep, timestep, input_name) -> DataArray:
"""Retrieves data from a scenario
Arguments
---------
dep : dict
A scenario dependency
timestep : int
Returns
-------
DataArray
"""
try:
data = self._store.read_scenario_variant_data(
dep['source_model_name'], # read from a given scenario model
dep['variant'], # with given scenario variant
dep['source_output_name'], # using output (variable) name
timestep
)
data.name = input_name # ensure name matches input (as caller expects)
except SmifDataError as ex:
msg = "Could not read data for output '{}' from '{}.{}' in {}"
raise SmifDataError(msg.format(
dep['source_output_name'],
dep['source_model_name'],
dep['variant'],
timestep
)) from ex
return data
def _resolve_source(self, input_name) -> Dict:
"""Find best dependency to provide input data
Returns
-------
dep : dict
A scenario or model dependency dictionary
"""
scenario_dep = None
try:
scenario_dep = self._scenario_dependencies[input_name]
except KeyError:
pass
model_dep = None
try:
model_dep = self._model_dependencies[input_name]
except KeyError:
pass
if scenario_dep is not None and model_dep is not None:
# if multiple dependencies, use scenario for timestep 0, model for
# subsequent timesteps
if self._current_timestep == self._timesteps[0]:
dep = scenario_dep
else:
dep = model_dep
elif scenario_dep is not None:
# else assume single dependency per input
dep = scenario_dep
elif model_dep is not None:
dep = model_dep
else:
raise SmifDataError("Dependency not defined for input '{}' in model '{}'".format(
input_name, self._model_name
))
return dep
def get_base_timestep_data(self, input_name):
"""Get data from the base timestep as required for model inputs
Parameters
----------
input_name : str
Returns
-------
smif.data_layer.data_array.DataArray
"""
return self.get_data(input_name, RelativeTimestep.BASE)
def get_previous_timestep_data(self, input_name):
"""Get data from the previous timestep as required for model inputs
Parameters
----------
input_name : str
Returns
-------
smif.data_layer.data_array.DataArray
"""
return self.get_data(input_name, RelativeTimestep.PREVIOUS)
def get_parameter(self, parameter_name):
"""Get the value for a parameter
Parameters
----------
parameter_name : str
Returns
-------
smif.data_layer.data_array.DataArray
Contains data annotated with the metadata and provides utility methods
to access the data in different ways
"""
if parameter_name not in self._parameters:
raise KeyError(
"'{}' not recognised as parameter for '{}'".format(
parameter_name, self._model_name))
return self._parameters[parameter_name]
def get_parameters(self):
"""Get all parameter values
Returns
-------
parameters : MappingProxyType
Read-only view of parameters (like a read-only dict)
"""
return MappingProxyType(self._parameters)
def set_results(self, output_name, data):
"""Set results values for model outputs
Parameters
----------
output_name : str
data : numpy.ndarray
"""
if hasattr(data, 'as_ndarray'):
raise TypeError("Pass in a numpy array")
if output_name not in self._outputs:
raise KeyError(
"'{}' not recognised as output for '{}'".format(output_name, self._model_name))
self.logger.debug(
"Write %s %s %s", self._model_name, output_name, self._current_timestep)
spec = self._outputs[output_name]
da = DataArray(spec, data)
self._store.write_results(
da,
self._modelrun_name,
self._model_name,
self._current_timestep,
self._decision_iteration
)
def get_results(self, output_name, decision_iteration=None,
timestep=None):
"""Get results values for model outputs
Parameters
----------
output_name : str
The name of an output for `model_name`
decision_iteration : int, default=None
timestep : int or RelativeTimestep, default=None
Returns
-------
smif.data_layer.data_array.DataArray
Contains data annotated with the metadata and provides utility methods
to access the data in different ways
Notes
-----
Access to model results is only granted to models contained
within self._model if self._model is a smif.model.model.CompositeModel
"""
model_name = self._model.name
# resolve timestep
if timestep is None:
timestep = self._current_timestep
elif isinstance(timestep, RelativeTimestep):
timestep = timestep.resolve_relative_to(self._current_timestep, self._timesteps)
else:
assert isinstance(timestep, int) and timestep <= self._current_timestep
# find output spec
try:
spec = self._model.outputs[output_name]
except KeyError:
msg = "'{}' not recognised as output for '{}'"
raise KeyError(msg.format(output_name, model_name))
if decision_iteration is None:
decision_iteration = self._decision_iteration
self.logger.debug(
"Read %s %s %s", model_name, output_name, timestep)
return self._store.read_results(
self._modelrun_name,
model_name,
spec,
timestep,
decision_iteration
)
def read_unit_definitions(self) -> List[str]:
"""Read unit definitions
Returns
-------
list[str]
"""
return self._store.read_unit_definitions()
def read_coefficients(self, source_dim: str, destination_dim: str) -> np.ndarray:
"""Reads coefficients from the store
Coefficients are uniquely identified by their source/destination dimensions.
This method and `write_coefficients` implement caching of conversion
coefficients between dimensions.
Parameters
----------
source_dim: str
Dimension name
destination_dim: str
Dimension name
Returns
-------
numpy.ndarray
"""
data = self._store.read_coefficients(source_dim, destination_dim)
return data
def write_coefficients(self, source_dim: str, destination_dim: str, data: np.ndarray):
"""Writes coefficients to the store
Coefficients are uniquely identified by their source/destination dimensions.
This method and `read_coefficients` implement caching of conversion
coefficients between dimensions.
Parameters
----------
source_dim: str
Dimension name
destination_dim: str
Dimension name
data : numpy.ndarray
"""
data = self._store.write_coefficients(source_dim, destination_dim, data)
return data
class ResultsHandle(object):
"""Results access for decision modules
"""
def __init__(self, store: Store, modelrun_name: str, sos_model,
current_timestep: int,
timesteps: Optional[List[int]] = None,
decision_iteration: Optional[int] = None):
self._store = store
self._modelrun_name = modelrun_name
self._sos_model = sos_model
self._current_timestep = current_timestep
self._timesteps = timesteps
self._decision_iteration = decision_iteration
@property
def base_timestep(self) -> int:
return self._timesteps[0]
@property
def current_timestep(self) -> int:
return self._current_timestep
@property
def previous_timestep(self) -> Union[None, int]:
rel = RelativeTimestep.PREVIOUS
return rel.resolve_relative_to(self._current_timestep, self._timesteps)
@property
def decision_iteration(self) -> int:
return self._decision_iteration
def get_results(self, model_name: str,
output_name: str,
timestep: Union[int, RelativeTimestep],
decision_iteration: int) -> DataArray:
"""Access model results
Parameters
----------
model_name : str
output_name : str
timestep : [int, RelativeTimestep]
decision_iteration : int
Returns
-------
smif.data_layer.data_array.DataArray
Contains data annotated with the metadata and provides utility methods
to access the data in different ways
"""
# resolve timestep
if hasattr(timestep, 'resolve_relative_to'):
timestep_value = \
timestep.resolve_relative_to(self._current_timestep,
self._timesteps) # type: Union[int, None]
else:
assert isinstance(timestep, int) and timestep <= self._current_timestep
timestep_value = timestep
if model_name in [model.name for model in self._sos_model.models]:
results_model = self._sos_model.get_model(model_name)
else:
msg = "Model '{}' is not contained in SosModel '{}'. Found {}."
raise KeyError(msg.format(model_name, self._sos_model.name,
self._sos_model.models)
)
try:
spec = results_model.outputs[output_name]
except KeyError:
msg = "'{}' not recognised as output for '{}'"
raise KeyError(msg.format(output_name, model_name))
results = self._store.read_results(self._modelrun_name,
model_name,
spec,
timestep_value,
decision_iteration)
return results
def get_state(self, timestep: int, decision_iteration: int) -> List[Dict]:
"""Retrieve the pre-decision state of the model
If the DataHandle instance has a timestep, then state is
established from the state file.
Returns
-------
List[Dict]
A list of {'name', 'build_year'} dictionaries showing the history of
decisions made up to this point
"""
state = self._store.read_state(
self._modelrun_name,
timestep,
decision_iteration
)
return state
|
StarcoderdataPython
|
3301029
|
import os
import re
from typing import List
from box import Box
from pyspark.sql.session import SparkSession
from consolebundle.detector import is_running_in_console
from injecta.container.ContainerInterface import ContainerInterface
from injecta.dtype.DType import DType
from injecta.service.Service import Service
from injecta.service.ServiceAlias import ServiceAlias
from injecta.service.argument.ServiceArgument import ServiceArgument
from injecta.config.ConfigMerger import ConfigMerger
from pyfonybundles.Bundle import Bundle
from databricksbundle.notebook.NotebookErrorHandler import set_notebook_error_handler
from databricksbundle.detector import is_databricks, is_databricks_repo
from databricksbundle.notebook.GithubLinkGenerator import GithubLinkGenerator
from databricksbundle.notebook.helpers import get_notebook_path, is_notebook_environment
from databricksbundle.notebook.logger.NotebookLoggerFactory import NotebookLoggerFactory
class DatabricksBundle(Bundle):
DATABRICKS_NOTEBOOK = "databricks_notebook.yaml"
DATABRICKS_SCRIPT = "databricks_script.yaml"
DATABRICKS_CONNECT = "databricks_connect.yaml"
@staticmethod
def autodetect():
if is_databricks():
if is_notebook_environment():
return DatabricksBundle(DatabricksBundle.DATABRICKS_NOTEBOOK)
return DatabricksBundle(DatabricksBundle.DATABRICKS_SCRIPT)
return DatabricksBundle(DatabricksBundle.DATABRICKS_CONNECT)
def __init__(self, databricks_config: str):
self.__databricks_config = databricks_config
def get_config_files(self):
return ["config.yaml", "databricks/" + self.__databricks_config]
def modify_raw_config(self, raw_config: dict) -> dict:
project_root_filesystem_path = os.getcwd()
project_root_repo_path = "<not_databricks_repo>"
if is_databricks_repo() and project_root_filesystem_path.startswith("/Workspace/Repos"):
project_root_repo_path = project_root_filesystem_path.replace("/Workspace/Repos", "/Repos")
project_root_paths = {
"parameters": {
"databricksbundle": {
"project_root": {
"filesystem": {
"path": project_root_filesystem_path,
},
"repo": {
"path": project_root_repo_path,
},
},
}
}
}
return ConfigMerger().merge(raw_config, project_root_paths)
def modify_services(self, services: List[Service], aliases: List[ServiceAlias], parameters: Box):
if is_running_in_console():
aliases.append(ServiceAlias("databricksbundle.logger", "consolebundle.logger"))
else:
service = Service("databricksbundle.logger", DType("logging", "Logger"))
service.set_factory(ServiceArgument(NotebookLoggerFactory.__module__), "create")
services.append(service)
return services, aliases
def modify_parameters(self, parameters: Box) -> Box:
if parameters.daipecore.logger.type == "default":
parameters.daipecore.logger.type = "databricks"
if is_databricks():
parameters.pysparkbundle.dataframe.show_method = "databricks_display"
parameters.daipecore.pandas.dataframe.show_method = "databricks_display"
if parameters.pysparkbundle.filesystem is not None:
raise Exception(
"pysparkbundle.filesystem parameter must not be explicitly set as dbutils.fs must be used for Databricks-based projects"
)
parameters.pysparkbundle.filesystem = "dbutils.fs"
return parameters
def boot(self, container: ContainerInterface):
parameters = container.get_parameters()
if (
is_databricks()
and is_notebook_environment()
and parameters.databricksbundle.enable_notebook_error_handler is True
and not re.match("^/Users/", get_notebook_path())
):
logger = container.get("databricksbundle.logger")
set_notebook_error_handler(logger)
multiple_results_enabled = "spark.databricks.workspace.multipleResults.enabled"
spark = container.get(SparkSession)
if is_databricks_repo():
import IPython
link_generator = GithubLinkGenerator()
IPython.get_ipython().user_ns["daipe_help"] = link_generator.generate_link_from_module
if spark.conf.get(multiple_results_enabled) == "false":
logger.warning(f"{multiple_results_enabled} is set to false!")
logger.warning("Error messages will not show properly!")
|
StarcoderdataPython
|
92966
|
import numpy as np
import pandas as pd
import util
from othello import Othello
from constants import COLUMN_NAMES
class StartTables:
_start_tables = []
def _init_start_tables(self):
"""
read start tables from csv file 'start_moves.csv'
and store them in _start_tables
"""
csv = pd.read_csv('start_moves.csv')
self._start_tables = np.array(csv, dtype="str")
# print(self._start_tables)
# ########################################################
# CAUTION: Call only once or on change of start tables ! #
# self.calculate_missing_start_moves() #
# ########################################################
def get_available_moves_of_start_tables(self, game: Othello):
"""
search self._start_table for move sequences starting with the one of game and get next elements of those
:return: list of available moves
"""
if len(self._start_tables) == 0:
self._init_start_tables()
turn_nr = game.get_turn_nr()
available_moves = []
taken_mv = game.get_taken_mvs_text()
for move_sequence in self._start_tables:
turn = 0
for move in move_sequence:
# move was played
if turn < turn_nr:
if taken_mv[turn] != move:
# move is different to start_table
break
# if start sequence is finished
elif move != "nan":
available_moves.append(move)
break
turn += 1
available_moves = list(dict.fromkeys(available_moves))
if "nan" in available_moves:
available_moves.remove("nan")
return available_moves
def calculate_missing_start_moves(self):
"""
The first version of the database contains no point symmetric move sequences.
This function calculates the point symmetric moves of the whole database.
Use with caution
"""
if len(self._start_tables) == 0:
self._init_start_tables()
new_moves = list()
# add first row in new start table
# first row == header = 0,1,2, ..
header_length = len(self._start_tables[0])
header = list()
for i in range(header_length):
header.append(str(i))
new_moves.append(header)
# calculate for all start sequences in start table
for move_sequence in self._start_tables:
# add move and opposite move to new start table
# |----------------------------------------------------------------|
# | WARNING: Only call each method once !!! |
# | If you use these functions do following: |
# | uncomment ..opposite..; => run code |
# | comment ..opposite..; uncomment ..diagonal..; run code |
# | comment ..diagonal.. ! |
# |----------------------------------------------------------------|
# | new_moves.append(self.calculate_opposite_move(move_sequence)) |
# | new_moves.append(self.calculate_diagonal_moves(move_sequence)) |
# |----------------------------------------------------------------|
new_moves.append(move_sequence)
# new_moves = self.remove_duplicates(new_moves)
# store new start table in file 'start_moves.csv'
with open('start_moves.csv', 'w') as f:
for row in new_moves:
csv_row = ""
for turn in row:
if turn == "nan":
break
if len(csv_row):
csv_row += "," + turn
else:
csv_row = turn
f.write("%s\n" % csv_row)
@staticmethod
def calculate_opposite_move(move_sequence):
"""
calculate the point symmetric moves of one given move sequence
"""
new_turns = list()
for move in move_sequence:
if move[0] not in {"a", "b", "c", "d", "e", "f", "g", "h"}:
break
# move is a char and a int , eg. 'd3'
# translate this move to a x and y coordinate
(row, column) = util.translate_move_to_pair(move)
if column < 8 and row < 7:
# mirror row and column at point 3.5,3.5 => middle of board
row -= 7
row = abs(row)
column -= 7
column = abs(column)
new_turns.append(COLUMN_NAMES[column] + str(row + 1))
print(f"old:{move_sequence}")
print(f"new:{new_turns}")
return new_turns
@staticmethod
def calculate_diagonal_moves(move_sequence):
"""
calculate the point symmetric move of one given move_sequence
"""
new_turns = list()
for move in move_sequence:
if move[0] not in {"a", "b", "c", "d", "e", "f", "g", "h"}:
break
# move is a char and a int , eg. 'd3'
# translate this move to a x and y coordinate
(row, column) = util.translate_move_to_pair(move)
if column < 8 and row < 7:
# mirror row and column at diagonal 0,0; 7,7 => middle of board
row_temp = row
row = column
column = row_temp
new_turns.append(COLUMN_NAMES[column] + str(row + 1))
print(f"old:{move_sequence}")
print(f"new:{new_turns}")
return new_turns
|
StarcoderdataPython
|
1710679
|
import flask
import flask_login
from flask.views import MethodView
from flask import request
from webapp import models
from webapp import api
from webapp.journal_plugins import extensions
from collections import Counter
import datetime
class IndexerPluginView(MethodView):
def get_summary(self, context):
objects = [
dict(entry=models.journal_entry_schema.dump(obj=e).data,
output=list(extensions.indexer_plugin._parse_entry_cached(e)))
for e in self.get_objects(context).items]
context['summary_objects'] = objects
seen_names = list()
for i in objects:
for name in i['output']:
try:
seen_names.append(name['label'])
except TypeError:
seen_names.append(name)
ctr = Counter(seen_names)
most_common = list(ctr.most_common())
context['summary'] = dict(most_common=most_common, objects=objects)
def get_context(self, request):
data = request.args
plugin = extensions.name_search
context = dict(args=data)
context['latest_date'] = models.JournalEntry.query.filter(
models.JournalEntry.owner_id == flask_login.current_user.id).order_by(
models.JournalEntry.create_date.desc()).first().date_string
end_date = data.get('end', context['latest_date'])
context['end'] = end_date
self.get_summary(context)
return context
def get_objects(self, context):
now = datetime.date(*map(int,context['end'].split('-')))
print('now',now)
owned_entries = models.JournalEntry.query.filter(models.JournalEntry.owner_id == flask_login.current_user.id)
then = now - datetime.timedelta(days=30)
print('then',then)
ordered_entries = owned_entries.order_by(models.JournalEntry.create_date.desc())
pagination = ordered_entries.filter(models.JournalEntry.create_date <= now).paginate(0, 30,
False)
print(pagination.items)
return pagination
# Flask-SQLalchemy pagination docs
# http://flask-sqlalchemy.pocoo.org/2.1/api/?highlight=pagination#flask.ext.sqlalchemy.Pagination
@flask_login.login_required
def get(self, **kwargs):
if 'end'not in request.args:
return flask.redirect(flask.url_for('site.plugins-headings_indexer-index', end=models.JournalEntry.query.filter(
models.JournalEntry.owner_id == flask_login.current_user.id).order_by(
models.JournalEntry.create_date.desc()).first().date_string))
name = flask_login.current_user.first_name
context = self.get_context(request)
try:
return flask.render_template(f'indexer/index.html', context=context)
# return 'Name Recognizer {pagination.items[0].contents} {pagination.items}'.format(pagination=context['pagination'])
except IndexError:
flask.abort(400)
|
StarcoderdataPython
|
3320427
|
from ibeverage import ibeverage
class vanilla(ibeverage):
def __init__(self, bevObj):
self.beverage = bevObj
self.cost = bevObj.cost + 0.25
self.description = bevObj.description + ' ' + 'vanilla'
def printCost(self):
print('Vanilla cost = {0}'.format(self.cost))
def printDescription(self):
print('Vanilla description = {0}'.format(self.description))
|
StarcoderdataPython
|
35913
|
import magic
import os
import random
import string
from ahye.settings import LOCAL_UPLOADS_DIR
def generate_filename(image_data, detect_extension=True):
alphanum = string.ascii_letters + string.digits
retval = ''
while not retval or os.path.exists(os.path.join(LOCAL_UPLOADS_DIR, retval)):
retval = ''.join(random.sample(alphanum, 8))
if detect_extension:
retval += get_file_extension(image_data)
else:
retval += '.png'
return retval
def get_file_extension(image_data):
s = magic.from_buffer(image_data)
if s.startswith('JPEG'):
return '.jpg'
elif s.startswith('GIF'):
return '.gif'
elif s.startswith('PNG'):
return '.png'
def guess_file_extension(url):
""" Used by the image mirroring service """
url = url.lower()
if '.jpg' in url or '.jpeg' in url:
return '.jpg'
elif '.gif' in url:
return '.gif'
elif '.png' in url:
return '.png'
elif '.svg' in url:
return '.svg'
else:
return '.jpg'
|
StarcoderdataPython
|
1786406
|
import os
import nox
from nox import options
PATH_TO_PROJECT = os.path.join(".", "duckari")
SCRIPT_PATHS = [
PATH_TO_PROJECT,
"noxfile.py",
]
options.sessions = ["format_fix", "mypy"]
@nox.session()
def format_fix(session):
session.install("-Ur", "nox-requirements.txt")
session.run("python", "-m", "black", *SCRIPT_PATHS)
session.run("python", "-m", "isort", *SCRIPT_PATHS)
# noinspection PyShadowingBuiltins
@nox.session()
def format(session):
session.install("-Ur", "nox-requirements.txt")
session.run("python", "-m", "black", *SCRIPT_PATHS, "--check")
@nox.session()
def mypy(session):
session.install("-Ur", "nox-requirements.txt")
session.install("-Ur", "requirements.txt")
session.install("-U", "mypy")
session.run("python", "-m", "mypy", "duckari")
|
StarcoderdataPython
|
132595
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import logging
import contextlib
import os
import datetime
import json
import numpy as np
import cv2
import math
import torch
from PIL import Image
from fvcore.common.timer import Timer
from detectron2.structures import BoxMode, PolygonMasks, Boxes
from fvcore.common.file_io import PathManager, file_lock
from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
"""
This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
"""
DOTA_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 0, "name": "small-vehicle"},
{"color": [119, 11, 32], "isthing": 1, "id": 1, "name": 'large-vehicle'},
{"color": [0, 0, 142], "isthing": 1, "id": 2, "name": 'ship'},
{"color": [0, 0, 230], "isthing": 1, "id": 3, "name": 'container-crane'},
{"color": [106, 0, 228], "isthing": 1, "id": 4, "name": 'storage-tank'},
{"color": [0, 60, 100], "isthing": 1, "id": 5, "name": 'plane'},
{"color": [0, 80, 100], "isthing": 1, "id": 6, "name": 'tennis-court'},
{"color": [0, 0, 70], "isthing": 1, "id": 7, "name": 'harbor'},
{"color": [0, 0, 192], "isthing": 1, "id": 8, "name": 'bridge'},
{"color": [250, 170, 30], "isthing": 1, "id": 9, "name": 'baseball-diamond'},
{"color": [100, 170, 30], "isthing": 1, "id": 10, "name": 'roundabout'},
{"color": [220, 220, 0], "isthing": 1, "id": 11, "name": 'basketball-court'},
{"color": [175, 116, 175], "isthing": 1, "id": 12, "name": 'swimming-pool'},
{"color": [250, 0, 30], "isthing": 1, "id": 13, "name": 'soccer-ball-field'},
{"color": [165, 42, 42], "isthing": 1, "id": 14, "name": 'ground-track-field'},
{"color": [0, 82, 0], "isthing": 1, "id": 15, "name": "helicopter"},
]
class DotaAPI:
def __init__(self, json_file):
with open(json_file) as f:
data = json.load(f)
self.features = data['features']
@staticmethod
def cvt_dota_to_detectron(dota_bbox: list, patch_size: tuple) -> list:
""" Processes a coordinate array from a geojson into (cy, cx, height, width, theta) format
:param (list) coords: an array of shape (N, 8) with 4 corner points of boxes
:return: (numpy.ndarray) an array of shape (N, 5) with coordinates in proper format
"""
coord = np.asarray(dota_bbox)
pts = np.reshape(coord, (-1, 5)).astype(dtype=np.float32)
cx = pts[:, 0] * patch_size[0]
cy = pts[:, 1] * patch_size[1]
width = pts[:, 2] * patch_size[0]
height = pts[:, 3] * patch_size[1]
theta = pts[:, 4] * 180 / math.pi
if width < height:
width, height = height, width
theta += 90.0
arr = [cx, cy, width, height, theta]
arr = np.asarray(arr).reshape(-1, 5)
arr = torch.tensor(arr)
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
arr = arr.numpy()
return arr
@staticmethod
def cvt_dota_to_detectron_rotated(dota_bbox: list, patch_size: tuple) -> list:
""" Processes a coordinate array from a geojson into (cy, cx, height, width, theta) format
:param (list) coords: an array of shape (N, 8) with 4 corner points of boxes
:return: (numpy.ndarray) an array of shape (N, 5) with coordinates in proper format
"""
coord = np.asarray(dota_bbox)
pts = np.reshape(coord, (-1, 5)).astype(dtype=np.float32)
cx = pts[:, 0] * patch_size[0]
cy = pts[:, 1] * patch_size[1]
width = pts[:, 2] * patch_size[0]
height = pts[:, 3] * patch_size[1]
theta = pts[:, 4] * 180 / math.pi
if width < height:
width, height = height, width
theta += 90.0
detectron_bbox = [cx, cy, width, height, theta]
return detectron_bbox
def main():
"""
Load a json file with DACON's instances annotation format.
Currently supports instance detection, instance segmentation,
and person keypoints annotations.
Args:
json_file (str): full path to the json file in dota instances annotation format.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., coco_2017_train).
If provided, this function will also put "thing_classes" into
the metadata associated with this dataset.
extra_annotation_keys (list[str]): list of per-annotation keys that should also be
loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
"category_id", "segmentation"). The values for these keys will be returned as-is.
For example, the densepose annotations are loaded in this way.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
data_path = "/ws/data/open_datasets/detection/dota/dota_patch_512_256/train"
json_file = os.path.join(data_path, 'labels.json')
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
dota_api = DotaAPI(json_file)
anns = dota_api.features
dataset_dicts = []
for ann in anns:
record = {}
record["file_name"] = ann['image_id']
record["height"] = ann['height']
record["width"] = ann['width']
patch_size = (ann['width'], ann['height'])
objs = []
properties = ann['properties']
count = 0
for p in properties:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# This fails only when the data parsing logic or the annotation file is buggy.
# The original COCO valminusminival2014 & minival2014 annotation files
# actually contains bugs that, together with certain ways of using COCO API,
# can trigger this assertion.
# if int(p["type_id"]) > 5:
# continue
count += 1
obj = {}
obj["bbox"] = dota_api.cvt_dota_to_detectron_rotated(p["bounds_imcoords"].split(","), patch_size)
obj["bbox_mode"] = BoxMode.XYWHA_ABS
obj["category_id"] = int(p["type_id"])
objs.append(obj)
if count == 0:
continue
record["annotations"] = objs
dataset_dicts.append(record)
output_dict = {"type":"instances","images":[],"annotations":[],
"categories": [
{
"supercategory": "none",
"name": d["name"],
"id": d["id"]
} for d in DOTA_CATEGORIES
]}
for record in dataset_dicts:
image = {}
image["file_name"] = record["file_name"]
image["height"] = record["height"]
image["width"] = record["width"]
f, b = os.path.splitext(os.path.split(record["file_name"])[1])[0].split('_')
f = int(''.join(i for i in f if i.isdigit()))
b = int(''.join(i for i in b if i.isdigit()))
image_id = f * 1000 + b
image["id"] = image_id
output_dict["images"].append(image)
count = 0
for obj in record["annotations"]:
annotation = {}
annotation["id"] = image_id * 10000 + count
bbox = [d.item() for d in obj["bbox"]]
annotation["bbox"] = bbox
annotation["image_id"] = image_id
annotation["ignore"] = 0
annotation["area"] = bbox[2] * bbox[3]
annotation["iscrowd"] = 0
annotation["category_id"] = obj["category_id"]
output_dict["annotations"].append(annotation)
count += 1
output_path = os.path.join(data_path, "coco_labels.json")
with open(output_path, 'w') as outfile:
json.dump(output_dict, outfile)
main()
|
StarcoderdataPython
|
198979
|
<reponame>kartben/MaixPy_scripts
import network, time
from machine import UART
from Maix import GPIO
from fpioa_manager import fm, board_info
fm.register(8, fm.fpioa.GPIOHS0, force=True)
wifi_en=GPIO(GPIO.GPIOHS0, GPIO.OUT)
fm.register(0, fm.fpioa.GPIOHS1, force=True)
wifi_io0_en=GPIO(GPIO.GPIOHS1, GPIO.OUT)
wifi_io0_en.value(0)
fm.register(board_info.WIFI_RX, fm.fpioa.UART2_TX, force=True)
fm.register(board_info.WIFI_TX, fm.fpioa.UART2_RX, force=True)
uart = UART(UART.UART2,115200,timeout=1000, read_buf_len=4096)
def wifi_enable(en):
global wifi_en
wifi_en.value(en)
def wifi_deal_ap_info(info):
res = []
for ap_str in info:
ap_str = ap_str.split(",")
info_one = []
for node in ap_str:
if node.startswith('"'):
info_one.append(node[1:-1])
else:
info_one.append(int(node))
res.append(info_one)
return res
wifi_enable(1)
time.sleep(2)
nic = network.ESP8285(uart)
ap_info = nic.scan()
ap_info = wifi_deal_ap_info(ap_info)
ap_info.sort(key=lambda x:x[2], reverse=True) # sort by rssi
for ap in ap_info:
print("SSID:{:^20}, RSSI:{:>5} , MAC:{:^20}".format(ap[1], ap[2], ap[3]) )
|
StarcoderdataPython
|
1632144
|
<reponame>nragon/vision
from multiprocessing import current_process
from os import devnull, kill
from signal import signal, SIGTERM, SIGINT
from socket import socket, SOCK_STREAM, AF_INET, SHUT_RDWR
from subprocess import Popen
from time import sleep
from core import common, logger
PROCESS_NAME = current_process().name
def start():
logger.info("starting recorder[pid=%s]" % common.PID)
config = common.load_config()
segment_dir = "%s/%s" % (config["output"], PROCESS_NAME)
config = config["cameras"][PROCESS_NAME]
logger.info("saving segments of camera %s in directory %s" % (PROCESS_NAME, segment_dir))
duration = int(config["duration"])
command = ["ffmpeg", "-rtsp_transport", "tcp", "-i", config["rtsp.url"], "-an", "-sn", "-b:v", "132k", "-bufsize",
"132k", "-c:v", "copy", "-r", str(config["fps"]), "-bsf:v", "h264_mp4toannexb", "-map", "0", "-shortest",
"-strftime", "1", "-f", "segment", "-segment_time", str(duration), "-segment_format", "mp4",
"%s/%s-%s.mp4" % (segment_dir, PROCESS_NAME, "%Y%m%d%H%M%S")]
url = (config["rtsp.ip"], config["rtsp.port"])
request_command = bytes(
"OPTIONS rtsp://%s:%s RTSP/1.0\\r\\nCSeq: 1\\r\\nUser-Agent: python\\r\\nAccept: application/sdp\\r\\n\\r\\n" % (
url[0], str(url[1])), "utf-8")
del config, segment_dir
process = None
try:
while 1:
if not is_reachable(url, request_command):
logger.warning("destination %s:%s is not reachable" % (url[0], str(url[1])))
logger.info("waiting for camera[%s:%s] to be available" % (url[0], str(url[1])))
while not is_reachable(url, request_command):
sleep(1)
close(process)
process = None
if not is_running(process):
close(process)
process = launch(command)
else:
sleep(duration)
finally:
logger.info("stopping recorder[pid=%s]" % common.PID)
close(process)
def launch(command):
attempts = 0
logger.info("launching recorder[%s]" % command)
while 1:
try:
with open(devnull, "wb") as dev_null:
process = Popen(command, stdout=dev_null, stderr=dev_null)
logger.info("process[%s] launched" % str(process.pid))
return process
except Exception as e:
if attempts >= 3:
logger.error("max of 3 launching attempts was reached when launching recorder process: %s" % e)
raise
logger.warning("error launching recorder process: %s" % e)
attempts += 1
logger.warning("reattempting launch (%s of 3)" % attempts)
sleep(1)
def is_running(process):
try:
return process.returncode is None and process.poll() is None
except:
return 0
def is_reachable(url, request_command):
try:
with socket(AF_INET, SOCK_STREAM) as s:
s.settimeout(1)
s.connect(url)
s.send(request_command)
index = s.recv(4096).decode("utf-8").find("RTSP/1.0 200 OK")
s.shutdown(SHUT_RDWR)
return index == 0
except:
return 0
def close(process):
if not process:
return
try:
process.terminate()
process.wait(3)
if process.returncode is None:
kill(process.pid, 9)
except:
kill(process.pid, 9)
def main():
signal(SIGTERM, common.stop)
signal(SIGINT, common.stop)
try:
start()
except KeyboardInterrupt:
pass
except Exception as e:
logger.error("An error occurred during recorder execution: %s" % e)
|
StarcoderdataPython
|
3323424
|
from bootstrap3.renderers import FieldRenderer, InlineFieldRenderer
from bootstrap3.text import text_value
from django.forms import CheckboxInput
from django.forms.utils import flatatt
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext
from i18nfield.forms import I18nFormField
def render_label(content, label_for=None, label_class=None, label_title='', optional=False):
"""
Render a label with content
"""
attrs = {}
if label_for:
attrs['for'] = label_for
if label_class:
attrs['class'] = label_class
if label_title:
attrs['title'] = label_title
if text_value(content) == ' ':
# Empty label, e.g. checkbox
attrs.setdefault('class', '')
attrs['class'] += ' label-empty'
builder = '<{tag}{attrs}>{content}{opt}</{tag}>'
return format_html(
builder,
tag='label',
attrs=mark_safe(flatatt(attrs)) if attrs else '',
opt=mark_safe('<br><span class="optional">{}</span>'.format(pgettext('form', 'Optional'))) if optional else '',
content=text_value(content),
)
class ControlFieldRenderer(FieldRenderer):
def __init__(self, *args, **kwargs):
kwargs['layout'] = 'horizontal'
super().__init__(*args, **kwargs)
def add_label(self, html):
label = self.get_label()
if hasattr(self.field.field, '_required'):
# e.g. payment settings forms where a field is only required if the payment provider is active
required = self.field.field._required
elif isinstance(self.field.field, I18nFormField):
required = self.field.field.one_required
else:
required = self.field.field.required
html = render_label(
label,
label_for=self.field.id_for_label,
label_class=self.get_label_class(),
optional=not required and not isinstance(self.widget, CheckboxInput)
) + html
return html
class BulkEditMixin:
def __init__(self, *args, **kwargs):
kwargs['layout'] = self.layout
super().__init__(*args, **kwargs)
def wrap_field(self, html):
field_class = self.get_field_class()
name = '{}{}'.format(self.field.form.prefix, self.field.name)
checked = self.field.form.data and name in self.field.form.data.getlist('_bulk')
html = (
'<div class="{klass} bulk-edit-field-group">'
'<label class="field-toggle">'
'<input type="checkbox" name="_bulk" value="{name}" {checked}> {label}'
'</label>'
'<div class="field-content">'
'{html}'
'</div>'
'</div>'
).format(
klass=field_class or '',
name=name,
label=pgettext('form_bulk', 'change'),
checked='checked' if checked else '',
html=html
)
return html
class BulkEditFieldRenderer(BulkEditMixin, FieldRenderer):
layout = 'horizontal'
class InlineBulkEditFieldRenderer(BulkEditMixin, InlineFieldRenderer):
layout = 'inline'
|
StarcoderdataPython
|
3299858
|
<gh_stars>1-10
from .measure import normalized_levenshtein, jaccard_word, jaccard_char
from gensim.models import KeyedVectors
import numpy as np
print('--------------load--------------------')
EMBEDDING_PATH = 'distance_module/zh.300.vec.gz'
EMBEDDING_DIM = 300
DEFAULT_KEYVEC = KeyedVectors.load_word2vec_format(EMBEDDING_PATH, limit=50000)
print('-------------finish-------------------')
def tokenize(text):
import jieba
return ' '.join(jieba.cut(text))
def doc2vec(tokenized):
tokens = tokenized.split(' ')
vec = np.full(EMBEDDING_DIM, 1e-10)
weight = 1e-8
for _token in tokens:
try:
vec += DEFAULT_KEYVEC.get_vector(_token)
weight += 1.0
except:
pass
return vec / weight
def batch_doc2vec(list_of_tokenized_text):
return [doc2vec(_text) for _text in list_of_tokenized_text]
class DistanceCalculator:
'''
Computes pair-wise distances between texts, using multiple metrics.
'''
def __init__(self):
pass
def __call__(self, docs_a, docs_b):
docs_a_cut = [tokenize(_doc) for _doc in docs_a]
docs_b_cut = [tokenize(_doc) for _doc in docs_b]
# further validating input
if not self.validate_input(docs_a, docs_b):
raise ValueError("distance module got invalid input")
# actual processing
num_elements = len(docs_a)
distances = dict()
distances['normalized_levenshtein'] = [normalized_levenshtein(docs_a[i], docs_b[i]) for i in range(num_elements)]
distances['jaccard_word'] = [jaccard_word(docs_a_cut[i], docs_b_cut[i]) for i in range(num_elements)]
distances['jaccard_char'] = [jaccard_char(docs_a[i], docs_b[i]) for i in range(num_elements)]
distances['embedding_cosine'] = self.batch_embedding_cosine_distance(docs_a_cut, docs_b_cut)
return distances
def validate_input(self, text_list_a, text_list_b):
'''
Determine whether two arguments are lists containing the same number of strings.
'''
if not (isinstance(text_list_a, list) and isinstance(text_list_b, list)):
return False
if not len(text_list_a) == len(text_list_b):
return False
for i in range(len(text_list_a)):
if not (isinstance(text_list_a[i], str) and isinstance(text_list_b[i], str)):
return False
return True
def batch_embedding_cosine_distance(self, text_list_a, text_list_b):
'''
Compute embedding cosine distances in batches.
'''
import numpy as np
embedding_array_a = np.array(batch_doc2vec(text_list_a))
embedding_array_b = np.array(batch_doc2vec(text_list_b))
norm_a = np.linalg.norm(embedding_array_a, axis=1)
norm_b = np.linalg.norm(embedding_array_b, axis=1)
cosine_numer = np.multiply(embedding_array_a, embedding_array_b).sum(axis=1)
cosine_denom = np.multiply(norm_a, norm_b)
cosine_dist = 1.0 - np.divide(cosine_numer, cosine_denom)
return cosine_dist.tolist()
|
StarcoderdataPython
|
3371850
|
<filename>Grammar/10Files and exceptions/division_caculator.py
print("Give me two numbers, and I`ll divide them.")
print("Enter `q` to quit.")
while True:
first_number = input("\nFirst Number: ")
if first_number == 'q':
break
second_number = input("\nSecond Number: ")
if second_number == 'q':3
break
try:
answer = int(first_number) / int(second_number)
except ZeroDivisionError:
print("You can`t divide by 0!")
else:
print(answer)
|
StarcoderdataPython
|
1653094
|
from dataclasses import dataclass, field
from typing import List
@dataclass
class A:
class Meta:
name = "a"
value: str = field(
init=False,
default="e1",
metadata={
"required": True,
}
)
@dataclass
class B:
class Meta:
name = "b"
value: str = field(
init=False,
default="e1",
metadata={
"required": True,
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
a: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"max_occurs": 3,
"sequential": True,
}
)
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
"max_occurs": 3,
"sequential": True,
}
)
b: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"max_occurs": 3,
"sequential": True,
}
)
@dataclass
class E:
class Meta:
name = "e"
value: str = field(
init=False,
default="e1",
metadata={
"required": True,
}
)
|
StarcoderdataPython
|
141301
|
import json
from unittest import TestCase
import websockets
import asyncio
from .utils.rabbitmq import send_trigger
from .utils.wiremock import set_bootstrap_response
from settings import WS_URI
# from time import time
class TestSubscribe(TestCase):
"""
Simple test for setting up a websocket connection.
TODO: Somehow measure performance.
"""
def test_subscribe_to_room(self):
NUM_ROOMS = 1500
set_bootstrap_response({
"allowed_rooms": [{
"target": "message",
"customer": f"{i}"
} for i in range(NUM_ROOMS)]
})
async def run():
async with websockets.connect(WS_URI) as wsx:
await wsx.recv()
await wsx.send('{"type": "status"}')
status = json.loads(await wsx.recv())
self.assertEqual(0, status['num_rooms'])
self.assertEqual(1, status['open_connections'])
# start1 = time()
async with websockets.connect(WS_URI) as ws:
await ws.recv()
# start = time()
for i in range(NUM_ROOMS):
# print(f'++++ {i}')
await ws.send(json.dumps({
"type": "subscribe",
"room": {
"target": "message",
"customer": f"{i}"
},
"requestId": f"{i}"
}))
res = json.loads(await ws.recv())
self.assertEqual("success", res['code'])
# print(f'subscribe done in {time() - start}')
# start = time()
await wsx.send('{"type": "status"}')
status = json.loads(await wsx.recv())
self.assertEqual(NUM_ROOMS, status['num_rooms'])
self.assertEqual(2, status['open_connections'])
for i in range(NUM_ROOMS):
send_trigger({
"rooms": [
{
"target": "message",
"customer": f"{i}"
}
],
"data": {
"customer": f"{i}"
}
})
res = await ws.recv()
# print(f'trigger done in {time() - start}')
# print(f'user done in {time() - start1}')
await wsx.send('{"type": "status"}')
status = json.loads(await wsx.recv())
self.assertEqual(0, status['num_rooms'])
self.assertEqual(1, status['open_connections'])
asyncio.get_event_loop().run_until_complete(run())
|
StarcoderdataPython
|
1756990
|
import numpy as np
# Scalars
product = np.dot(5, 4)
print("Dot Product of scalar values : ", product)
# 1D array
vector_a = 2 + 3j
vector_b = 4 + 5j
product = np.dot(vector_a, vector_b)
print("Dot Product : ", product)
|
StarcoderdataPython
|
3235730
|
import os
import pytest
import configparser
from foxha.utils import Utils
@pytest.fixture(scope='module')
def utils():
return Utils()
@pytest.fixture(scope='module')
def cipher_suite(utils, test_key_path):
return utils.parse_key_file(keyfile=test_key_path)
@pytest.fixture(scope='module')
def config_files_dir_utils(test_dir):
return os.path.dirname(__file__) + '/config_files/'
@pytest.fixture(scope='module')
def connection_config_not_exist(config_files_dir_utils):
return config_files_dir_utils + '.file_does_not_exists'
@pytest.fixture(scope='module')
def connection_config_empty(config_files_dir_utils):
return config_files_dir_utils + '.test_empty_key'
@pytest.fixture(scope='module')
def connection_config_without_section(utils, config_files_dir_utils):
return config_files_dir_utils + '.config_file_with_no_section_error.ini'
@pytest.fixture(scope='module')
def connection_config_without_option(utils, config_files_dir_utils):
return config_files_dir_utils + '.config_file_with_no_option_error.ini'
@pytest.fixture(scope='module')
def connection_config_with_invalid_token(utils, config_files_dir_utils):
return config_files_dir_utils + '.config_file_with_invalid_token.ini'
@pytest.fixture(scope='module')
def connection_config_with_pading_token(utils, config_files_dir_utils):
return config_files_dir_utils + '.test_key_with_pading_error'
@pytest.fixture(scope='module')
def config_file_dict(test_connection_config_path):
config = configparser.ConfigParser()
config.read(test_connection_config_path)
return {
'host': config.get('repository', 'Host'),
'port': int(config.get('repository', 'Port')),
'database': config.get('repository', 'Database'),
'user': config.get('repository', 'User'),
'password': config.get('repository', 'Pass')
}
|
StarcoderdataPython
|
1620795
|
<filename>src/python/models/train_model.py<gh_stars>0
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
import numpy as np
from catboost import CatBoostClassifier
from dotenv import find_dotenv, load_dotenv
import pandas as pd
from sklearn.metrics import accuracy_score,roc_auc_score, f1_score, matthews_corrcoef
from data.KFoldTargetEncoderTrain import KFOLD_TARGET_ENC_COL_POSTFIX
from data.preprocessor import preprocess, get_categorical_cols
from data.utils import get_lbo_pools, gen_seeds, get_catboost_pools
# These are taken from HP
params = {
'loss_function': 'Logloss',
'iterations': 200,
'learning_rate': 0.3668666368559461,
'l2_leaf_reg': 2,
'custom_metric': ['Accuracy', 'Recall', 'F1','MCC'],
'eval_metric': 'AUC',
#'eval_metric': 'F1',
'random_seed': 42,
'logging_level': 'Silent',
'use_best_model': True,
'od_type': 'Iter',
'od_wait': 50,
#'class_weights': [1,2],
'depth': 7
}
@click.command()
@click.argument('train_file', type=click.Path())
@click.argument('model_file', type=click.Path())
def main(train_file, model_file):
""" Runs training
"""
logger = logging.getLogger(__name__)
logger.info('Starting training...')
logger.debug('Loading data...')
df_train = pd.read_csv(train_file)
#df_train.drop('card1'+KFOLD_TARGET_ENC_COL_POSTFIX, axis=1, inplace=True)
df_train = df_train.replace(np.nan, '', regex=True)
logger.debug('Data loaded. About to create LBO data set..')
X, y, X_valid, y_valid = get_lbo_pools(df_train)
train_pool, validate_pool = get_catboost_pools(X, y, X_valid, y_valid)
SEED = 42
gen_seeds(SEED)
model = CatBoostClassifier(**params)
logger.debug('Data created, about to fit model..')
model.fit(
train_pool,
eval_set=validate_pool,
logging_level='Info' #='Verbose',
#plot=False
)
logger.debug('Model fitted. About to check..')
preds_proba = model.predict_proba(X_valid)[:,1]
preds = model.predict(X_valid)
logger.info('Accuracy: ' + str(accuracy_score(y_valid, preds)) )
logger.info('AUC score: ' + str(roc_auc_score(y_valid, preds_proba)) )
logger.info('F1 score: ' + str(f1_score(y_valid, preds_proba.round())) )
logger.info('MCC score: ' + str(matthews_corrcoef(y_valid, preds_proba.round())) )
logger.debug('About to save model to file: '+model_file)
model.save_model(model_file)
logger.info('Completed training')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
StarcoderdataPython
|
198576
|
'''
Basic structures
'''
from struct import Struct
from .helpers import num, hexbyte
from .enums import GradientType
_MAGIC = b'4-tP'
_LENGTH = Struct('<i')
def _bare(fmt: str, mul=None) -> tuple:
fmt = Struct(fmt)
def packer(*data) -> bytes:
if mul is not None:
data = [x / mul for x in data]
return fmt.pack(*data)
def unpacker(data: bytes):
result = fmt.unpack(data)
if mul is not None:
result = [x * mul for x in result]
if len(result) == 1:
result = result[0]
return result
return packer, unpacker
def string_unpack(data: bytes) -> str:
length, = _LENGTH.unpack(data[:4])
return data[4:4+length].decode().replace('\x00', '')
def string_pack(data: str) -> bytes:
data = data.encode()
buffer_bytes = -len(data) % 4
return _LENGTH.pack(len(data)) + data + b'\x00' * buffer_bytes
def array_unpack(data: bytes) -> list:
length, = _LENGTH.unpack(data[4:8])
data = data[8:]
starts = data[:4*length]
data = data[4*length:]
blobs = []
for i in range(length):
i *= 4
start, = _LENGTH.unpack(starts[i:i+4])
end = starts[i+4:i+8]
if end:
end, = _LENGTH.unpack(end)
else:
end = None
blob = data[start:end]
blobs.append(blob)
return blobs
def array_pack(blobs: list) -> bytes:
pos = 0
starts = []
for blob in blobs:
starts.append(pos)
pos += len(blob)
return (
_LENGTH.pack(1) + _LENGTH.pack(len(blobs)) +
b''.join([_LENGTH.pack(i) for i in starts]) +
b''.join(blobs)
)
def kind_unpack(data: bytes) -> str:
return data[::-1].decode()
def kind_pack(data: str) -> bytes:
return data[::-1].encode()
_FORMATS = {
b'PTPt': _bare('>dd', mul=2),
b'PTSz': _bare('>dd', mul=2),
b'BDSz': _bare('<qq'),
b'PTFl': _bare('>d'),
b'Strn': (string_pack, string_unpack),
b'LOpc': _bare('<H'),
b'SI16': _bare('<hxx'),
b'Arry': (array_pack, array_unpack),
b'Guid': _bare('<hih'),
b'UI64': _bare('<Q'),
b'Blnd': (kind_pack, kind_unpack),
}
def blob(blob: bytes) -> object:
if not len(blob) > 12:
raise TypeError('Pixelmator blobs are more than 12 bytes! ')
magic = blob[:4]
if not magic == _MAGIC:
raise TypeError('Pixelmator blobs start with the magic number "4-tP".')
kind = blob[4:8][::-1]
if kind in _FORMATS:
packer, unpacker = _FORMATS[kind]
else:
raise TypeError(f'Unknown blob type {kind}.')
length, = _LENGTH.unpack(blob[8:12])
data = blob[12:12+length]
return unpacker(data)
def make_blob(kind: bytes, *data) -> bytes:
if kind not in _FORMATS:
raise TypeError(f'Unknown blob type {kind}.')
packer, unpacker = _FORMATS[kind]
data = packer(*data)
length = _LENGTH.pack(len(data))
return _MAGIC + kind[::-1] + length + data
def verb(data, version=1):
'''vercon, verstruct or verlist extractor'''
if isinstance(data, dict):
if 'version' in data:
ver = data['version']
con = data['versionSpecifiContainer']
else:
ver = data['structureVersion']
con = data['versionSpecificInfo']
else:
ver, con = data
if ver != version:
raise VersionError(
f"Data structure was version {ver}, expected {version}. "
"Most likely this is because a newer Pixelmator version exists. "
"Try updating `pxdlib`, or otherwise contact developer."
)
return con
class RGBA:
'''
RGBA color in [0, 255]-space.
'''
def __init__(self, r=0, g=0, b=0, a=255):
'''
Accepts RGBA values, tuple or hex string.
'''
if isinstance(r, (tuple, list)):
if len(r) == 3:
r, g, b = r
elif len(r) == 4:
r, g, b, a = r
else:
raise ValueError('Iterable must be length 3 or 4.')
elif isinstance(r, str):
string = r
if string.startswith('#'):
string = string[1:]
if not len(string) in (6, 8):
raise ValueError(
'String colors must be #RRGGBB or #RRGGBBAA.'
)
r = int(string[0:2], base=16)
g = int(string[2:4], base=16)
b = int(string[4:6], base=16)
if len(string) == 8:
a = int(string[6:8], base=16)
self.r = num(r)
self.g = num(g)
self.b = num(b)
self.a = num(a)
def __iter__(self):
tup = self.r, self.g, self.b, self.a
return iter(tup)
def __repr__(self):
val = hexbyte(self.r) + hexbyte(self.g) + hexbyte(self.b)
if self.a != 255:
val += hexbyte(self.a)
return f"RGBA('{val}')"
@classmethod
def _from_data(cls, data):
data = verb(data)
assert data['m'] == 2
assert data['csr'] == 0
r, g, b, a = data['c']
return cls(r*255, g*255, b*255, a*255)
def _to_data(self):
r, g, b, a = list(self)
return [1, {
'm': 2, 'csr': 0,
'c': [r/255, g/255, b/255, a/255]
}]
def __eq__(self, other):
return all([
round(a[0]) == round(a[1])
for a in zip(tuple(self), tuple(other))
])
class Gradient:
'''
Gradient of two or more colours.
Contains a list of (RGBA, x),
alongside a list of midpoints
and the gradient kind.
'''
_default_cols = [
(RGBA('48a0f8'), 0), (RGBA('48a0f800'), 1)
]
def __init__(self, colors=None, midpoints=None, kind=0):
self.kind = GradientType(kind)
self.colors = colors or self._default_cols
x0 = -1
for c, x in self.colors:
assert x0 < x
x0 = x
if midpoints is None:
midpoints = []
for i in range(len(self.colors) - 1):
c1, x1 = self.colors[i]
c2, x2 = self.colors[i+1]
midpoints.append((x1 + x2)/2)
self.midpoints = midpoints
def __repr__(self):
vals = []
if self.colors != self._default_cols:
vals.append(repr(self.colors))
midpoints_default = True
for i in range(len(self.colors) - 1):
c1, x1 = self.colors[i]
c2, x2 = self.colors[i+1]
m_apparent = (x1 + x2)/2
if self.midpoints[i] != m_apparent:
midpoints_default = False
break
if not midpoints_default:
vals.append(repr(self.midpoints))
if self.kind != 0:
vals.append(str(self.kind))
return f"Gradient({', '.join(vals)})"
@classmethod
def _from_data(cls, data):
data = verb(data)
assert data['csr'] == 0
colors = [verb(i) for i in data['s']]
colors = [
(RGBA(r*255, g*255, b*255, a*255), x)
for (r, g, b, a), x in colors
]
return cls(colors, data['m'], data['t'])
def _to_data(self):
data = {'csr': 0}
data['m'] = list(self.midpoints)
data['s'] = [
[1, [[c.r/255, c.g/255, c.g/255, c.a/255], x]]
for c, x in self.colors
]
data['t'] = int(self.kind)
return [1, data]
|
StarcoderdataPython
|
80711
|
<filename>pyontutils/utils_extra.py
"""
Reused utilties that depend on packages outside the python standard library.
"""
import hashlib
import rdflib
rdflib.plugin.register('librdfxml', rdflib.parser.Parser,
'pyontutils.librdf', 'libRdfxmlParser')
rdflib.plugin.register('libttl', rdflib.parser.Parser,
'pyontutils.librdf', 'libTurtleParser')
def check_value(v):
if isinstance(v, rdflib.Literal) or isinstance(v, rdflib.URIRef):
return v
elif isinstance(v, str) and v.startswith('http'):
return rdflib.URIRef(v)
else:
return rdflib.Literal(v)
class OrderInvariantHash:
""" WARNING VERY BROKEN DO NOT USE """
def __init__(self, cypher=hashlib.sha256, encoding='utf-8'):
self.cypher = cypher
self.encoding = encoding
def convertToBytes(self, e):
if isinstance(e, rdflib.BNode):
raise TypeError('BNode detected, please convert bnodes to '
'ints in a deterministic manner first.')
elif isinstance(e, rdflib.URIRef):
return e.encode(self.encoding)
elif isinstance(e, rdflib.Literal):
return self.makeByteTuple((str(e), e.datatype, e.language))
elif isinstance(e, int):
return str(e).encode(self.encoding)
elif isinstance(e, bytes):
return e
elif isinstance(e, str):
return e.encode(self.encoding)
else:
raise TypeError(f'Unhandled type on {e!r} {type(e)}')
def makeByteTuple(self, t):
return b'(' + b' '.join(self.convertToBytes(e)
for e in t
if e is not None) + b')'
def __call__(self, iterable):
# convert all strings bytes
# keep existing bytes as bytes
# join as follows b'(http://s http://p http://o)'
# join as follows b'(http://s http://p http://o)'
# bnodes local indexes are treated as strings and converted
# literals are treated as tuples of strings
# if lang is not present then the tuple is only 2 elements
# this is probably not the fastest way to do this but it works
#bytes_ = [makeByteTuple(t) for t in sorted(tuples)]
#embed()
m = self.cypher()
# when everything is replaced by an integer or a bytestring
# it is safe to sort last because identity is ensured
[m.update(b) for b in sorted(self.makeByteTuple(t) for t in iterable)]
return m.digest()
def currentVMSKb():
import psutil
p = psutil.Process(os.getpid())
return p.memory_info().vms
def memoryCheck(vms_max_kb):
""" Lookup vms_max using getCurrentVMSKb """
import psutil
safety_factor = 1.2
vms_max = vms_max_kb
vms_gigs = vms_max / 1024 ** 2
buffer = safety_factor * vms_max
buffer_gigs = buffer / 1024 ** 2
vm = psutil.virtual_memory()
free_gigs = vm.available / 1024 ** 2
if vm.available < buffer:
raise MemoryError('Running this requires quite a bit of memory ~ '
f'{vms_gigs:.2f}, you have {free_gigs:.2f} of the '
f'{buffer_gigs:.2f} needed')
|
StarcoderdataPython
|
1698849
|
from . import pairwise
from . import losses
from . import objects
from .losses import gmm
from . import phantoms
from . import utils
|
StarcoderdataPython
|
1736644
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Memory leak detection utility."""
from tensorflow.python.framework.python_memory_checker import _PythonMemoryChecker
from tensorflow.python.profiler import trace
from tensorflow.python.util import tf_inspect
try:
from tensorflow.python.platform.cpp_memory_checker import _CppMemoryChecker as CppMemoryChecker # pylint:disable=g-import-not-at-top
except ImportError:
CppMemoryChecker = None
def _get_test_name_best_effort():
"""If available, return the current test name. Otherwise, `None`."""
for stack in tf_inspect.stack():
function_name = stack[3]
if function_name.startswith('test'):
try:
class_name = stack[0].f_locals['self'].__class__.__name__
return class_name + '.' + function_name
except: # pylint:disable=bare-except
pass
return None
# TODO(kkb): Also create decorator versions for convenience.
class MemoryChecker(object):
"""Memory leak detection class.
This is a utility class to detect Python and C++ memory leaks. It's intended
for both testing and debugging. Basic usage:
>>> # MemoryChecker() context manager tracks memory status inside its scope.
>>> with MemoryChecker() as memory_checker:
>>> tensors = []
>>> for _ in range(10):
>>> # Simulating `tf.constant(1)` object leak every iteration.
>>> tensors.append(tf.constant(1))
>>>
>>> # Take a memory snapshot for later analysis.
>>> memory_checker.record_snapshot()
>>>
>>> # `report()` generates a html graph file showing allocations over
>>> # snapshots per every stack trace.
>>> memory_checker.report()
>>>
>>> # This assertion will detect `tf.constant(1)` object leak.
>>> memory_checker.assert_no_leak_if_all_possibly_except_one()
`record_snapshot()` must be called once every iteration at the same location.
This is because the detection algorithm relies on the assumption that if there
is a leak, it's happening similarly on every snapshot.
"""
@trace.trace_wrapper
def __enter__(self):
self._python_memory_checker = _PythonMemoryChecker()
if CppMemoryChecker:
self._cpp_memory_checker = CppMemoryChecker(_get_test_name_best_effort())
return self
@trace.trace_wrapper
def __exit__(self, exc_type, exc_value, traceback):
if CppMemoryChecker:
self._cpp_memory_checker.stop()
# We do not enable trace_wrapper on this function to avoid contaminating
# the snapshot.
def record_snapshot(self):
"""Take a memory snapshot for later analysis.
`record_snapshot()` must be called once every iteration at the same
location. This is because the detection algorithm relies on the assumption
that if there is a leak, it's happening similarly on every snapshot.
The recommended number of `record_snapshot()` call depends on the testing
code complexity and the allcoation pattern.
"""
self._python_memory_checker.record_snapshot()
if CppMemoryChecker:
self._cpp_memory_checker.record_snapshot()
@trace.trace_wrapper
def report(self):
"""Generates a html graph file showing allocations over snapshots.
It create a temporary directory and put all the output files there.
If this is running under Google internal testing infra, it will use the
directory provided the infra instead.
"""
self._python_memory_checker.report()
if CppMemoryChecker:
self._cpp_memory_checker.report()
@trace.trace_wrapper
def assert_no_leak_if_all_possibly_except_one(self):
"""Raises an exception if a leak is detected.
This algorithm classifies a series of allocations as a leak if it's the same
type(Python) orit happens at the same stack trace(C++) at every snapshot,
but possibly except one snapshot.
"""
self._python_memory_checker.assert_no_leak_if_all_possibly_except_one()
if CppMemoryChecker:
self._cpp_memory_checker.assert_no_leak_if_all_possibly_except_one()
@trace.trace_wrapper
def assert_no_new_python_objects(self, threshold=None):
"""Raises an exception if there are new Python objects created.
It computes the number of new Python objects per type using the first and
the last snapshots.
Args:
threshold: A dictionary of [Type name string], [count] pair. It won't
raise an exception if the new Python objects are under this threshold.
"""
self._python_memory_checker.assert_no_new_objects(threshold=threshold)
|
StarcoderdataPython
|
3302674
|
<reponame>NULLCT/LOMC
# coding: utf-8
import sys
import math
import io
from collections import Counter
from collections import deque
def i_input():
return int(input())
def i_map():
return map(int, input().split())
def i_list():
return list(i_map())
def main():
sys.setrecursionlimit(10**6)
n, q = i_map()
global R
R = [[] for _ in range(n + 1)]
global color
color = [0 for _ in range(n + 1)]
color[1] = 1
for _ in range(n - 1):
a, b = i_map()
R[a].append(b)
R[b].append(a)
#dfs(1)
que = deque()
que.append(1)
while que:
t = que.pop()
for r in R[t]:
if color[r] == 0:
color[r] = color[t] * -1
que.append(r)
for i in range(q):
c, d = i_map()
if color[c] == color[d]:
print("Town")
else:
print("Road")
def dfs(s):
for r in R[s]:
if color[r] == 0:
color[r] = color[s] * -1
dfs(r)
if __name__ == '__main__':
_INPUT = '''\
2 1
1 2
1 2
'''
if sys.platform == 'win32':
sys.stdin = io.StringIO(_INPUT)
main()
|
StarcoderdataPython
|
3327025
|
import re
def scene_names_key_func(scene_name):
"""
Key function for sorting scenes with the naming convention that was used
"""
m = re.search('FloorPlan[_]?([a-zA-Z\-]*)([0-9]+)_?([0-9]+)?.*$', scene_name)
last_val = m.group(3) if m.group(3) is not None else -1
return m.group(1), int(m.group(2)), int(last_val)
|
StarcoderdataPython
|
1764551
|
<reponame>usc-psychsim/atomic_domain_definitions<filename>atomic/util/plot.py
import colorsys
import copy
import matplotlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.legend_handler import HandlerTuple
from matplotlib.lines import Line2D
from scipy.cluster.hierarchy import dendrogram
from sklearn.cluster import AgglomerativeClustering
from atomic.util.io import get_file_changed_extension
__author__ = '<NAME>'
__email__ = '<EMAIL>'
TITLE_FONT_SIZE = 10
def plot_bar(data, title, output_img=None, colors=None, plot_mean=True, plot_error=True, x_label='', y_label='',
show_legend=False, horiz_grid=True, show=False):
"""
Plots the given data as a bar-chart, assumed to be a collection of key-value pairs.
:param dict[str, float or (float,float)] data: the data to be plotted.
:param str title: the title of the plot.
:param str output_img: the path to the image on which to save the plot. None results in no image being saved.
:param np.ndarray or None colors: an array of shape (num_variables, 3) containing colors for each variable in the
[R, G, B] normalized format ([0-1]). If `None`, colors will be automatically generated.
:param bool plot_mean: whether to plot a horizontal line across the bar chart denoting the mean of the values.
:param bool plot_error: whether to plot error bars (requires input `data` to be 2-dimensional for each entry).
:param str x_label: the label of the X axis.
:param str y_label: the label of the Y axis.
:param bool show_legend: whether to show a legend. If `False`, data labels will be placed on tick marks.
:param bool horiz_grid: whether to show an horizontal grid.
:param bool show: whether to show the plot on the screen.
:return:
"""
data_size = len(data)
labels = list(data.keys())
values = np.array([data[key] if isinstance(data[key], tuple) or isinstance(data[key], list) else [data[key]]
for key in labels]).T
# save to csv
np.savetxt(get_file_changed_extension(output_img, 'csv'), values, '%s', ',', header=','.join(labels), comments='')
# automatically get colors
if colors is None:
colors = distinct_colors(data_size)
# create bar chart with mean and error-bars
plt.figure(figsize=(max(8., 0.4 * data_size), 6))
ax = plt.gca()
if plot_error and values.shape[0] > 1:
ax.bar(np.arange(data_size), values[0], yerr=values[1], capsize=2, error_kw={'elinewidth': .75},
color=colors, edgecolor='black', linewidth=0.7, zorder=100)
else:
ax.bar(np.arange(data_size), values[0], color=colors, edgecolor='black', linewidth=0.7, zorder=100)
if plot_mean:
ax.axhline(y=np.mean(values[0]), label='Mean', c='black', ls='--')
if show_legend:
# add custom legend on the side
plt.xticks([])
patches = []
for i, color in enumerate(colors):
patches.append(mpatches.Patch(color=color, label=labels[i]))
leg = plt.legend(handles=patches, loc='right', fancybox=False)
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_linewidth(0.8)
else:
# show data labels in tick marks
short_labels = max(len(label) for label in labels) <= 8
rotation = 0 if short_labels else 45
align = 'center' if short_labels else 'right'
plt.xticks(np.arange(data_size), labels, rotation=rotation, horizontalalignment=align)
format_and_save_plot(ax, title, output_img, x_label, y_label, False, horiz_grid, show)
plt.close()
def plot_clustering_distances(clustering, file_path):
"""
Saves a plot with the clustering distances resulting from the given clustering algorithm.
:param AgglomerativeClustering clustering: the clustering algorithm with the resulting distances.
:param str file_path: the path to the file in which to save the plot.
:return:
"""
# saves csv with distances
num_clusters = np.flip(np.arange(len(clustering.distances_) + 1) + 1)
distances = np.hstack(([0], clustering.distances_))
np.savetxt(get_file_changed_extension(file_path, 'csv'), np.column_stack((num_clusters, distances)), '%s', ',',
header='Num. Clusters,Distance', comments='')
# plots distances
plt.figure()
plt.plot(num_clusters, distances)
plt.xlim(num_clusters[0], num_clusters[-1]) # invert for more natural view of hierarchical clustering
plt.ylim(ymin=0)
plt.axvline(x=clustering.n_clusters_, c='red', ls='--', lw=1)
format_and_save_plot(plt.gca(), 'Traces Clustering Distance', file_path,
x_label='Num. Clusters', show_legend=False)
def plot_clustering_dendrogram(clustering, file_path, labels=None):
"""
Saves a dendrogram plot with the clustering resulting from the given model.
:param AgglomerativeClustering clustering: the clustering algorithm with the resulting labels and distances.
:param str file_path: the path to the file in which to save the plot.
:param list[str] labels: a list containing a label for each clustering datapoint. If `None`, the cluster of each
datapoint is used as label.
:return:
"""
# saves linkage info to csv
linkage_matrix = get_linkage_matrix(clustering)
np.savetxt(get_file_changed_extension(file_path, 'csv'), linkage_matrix, '%s', ',',
header='Child 0, Child 1, Distance, Leaf Count', comments='')
# saves dendrogram plot
labels = [str(c) for c in clustering.labels_] if labels is None else labels
dendrogram(linkage_matrix, clustering.n_clusters_, 'level', clustering.distance_threshold,
labels=labels, leaf_rotation=45 if max(len(l) for l in labels) > 8 else 0, leaf_font_size=8)
dist_thresh = clustering.distances_[len(clustering.distances_) - clustering.n_clusters_ + 1] \
if clustering.distance_threshold is None else clustering.distance_threshold
plt.axhline(y=dist_thresh, c='red', ls='--', lw=1)
format_and_save_plot(plt.gca(), 'Traces Clustering Dendrogram', file_path, show_legend=False)
def plot_confusion_matrix(matrix, output_img, save_csv=True,
x_labels=None, y_labels=None, color_map=None, title='',
x_label='', y_label='', vmin=None, vmax=None, colorbar=True, rasterized=False):
"""
Plots the given confusion matrix.
:param np.ndarray matrix: the confusion matrix to be plotted.
:param str output_img: the path to the image on which to save the plot. None results in no image being saved.
:param bool save_csv: whether to save a CSV file with the confusion matrix.
:param list[str] x_labels: the labels for the elements in the X-axis.
:param list[str] y_labels: the labels for the elements in the Y-axis.
:param str or None color_map: the colormap to be used.
:param str title: the plot's title.
:param str x_label: the label of the X axis.
:param str y_label: the label of the Y axis.
:param float vmin: the colorbar minimal value. The true minimum will be used if set to `None`.
:param float vmax: the colorbar maximal value. The true maximum will be used if set to `None`.
:param bool colorbar: whether to plot colobar.
:param bool rasterized: whether to rasterize the pcolormesh when drawing vector graphics.
:return:
"""
# saves matrix to csv
if save_csv and output_img is not None:
pd.DataFrame(matrix, y_labels, x_labels).to_csv(get_file_changed_extension(output_img, 'csv'))
# save grid/heatmap plot
if x_labels or y_labels is None:
fig, ax = plt.subplots()
else:
fig, ax = plt.subplots(figsize=(max(8., len(x_labels) * .5), max(6., len(y_labels) * 6 / 16)))
color_map = copy.copy(matplotlib.cm.get_cmap(color_map))
color_map.set_under('w')
color_map.set_over('w')
plt.pcolormesh(matrix, cmap=color_map, edgecolors=None, linewidth=0.1, vmax=vmax, vmin=vmin, rasterized=rasterized)
if x_labels is not None:
tilt = max(map(len, x_labels)) > 10
plt.xticks(np.arange(len(x_labels)) + 0.5, x_labels,
rotation=45 if tilt else 0,
horizontalalignment='right' if tilt else 'center')
if y_labels is not None:
plt.yticks(np.arange(len(y_labels)) + 0.5, y_labels)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_aspect('equal')
if colorbar:
plt.colorbar()
format_and_save_plot(ax, title, output_img, x_label, y_label, False, False)
def get_linkage_matrix(clustering):
"""
Gets a linkage matrix from the `sklearn` clustering model.
See: https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_dendrogram.html
:param AgglomerativeClustering clustering: the clustering model.
:return:
"""
# create the counts of samples under each node
counts = np.zeros(clustering.children_.shape[0])
n_samples = len(clustering.labels_)
for i, merge in enumerate(clustering.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
return np.column_stack([clustering.children_, clustering.distances_, counts]).astype(float)
def format_and_save_plot(ax, title, output_img=None, x_label='', y_label='',
show_legend=True, horiz_grid=True, show=False):
"""
Utility function that formats a plot and saves it to a file. Also closes the current plot.
This gives the generated plots a uniform look-and-feel across the library.
:param ax: the plot axes to be formatted.
:param str title: the plot's title.
:param str output_img: the path to the image on which to save the plot. None results in no image being saved.
:param str x_label: the label of the X axis.
:param str y_label: the label of the Y axis.
:param bool show_legend: whether to show the legend.
:param bool horiz_grid: whether to show an horizontal grid.
:param bool show: whether to show the plot on the screen.
:return:
"""
plt.title(title) # , fontweight='bold', fontsize=TITLE_FONT_SIZE)
ax.set_xlabel(x_label) # , fontweight='bold')
ax.set_ylabel(y_label) # , fontweight='bold')
if horiz_grid:
ax.yaxis.grid(True, which='both', linestyle='--', color='lightgrey')
if show_legend:
leg = plt.legend(fancybox=False)
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_linewidth(0.8)
if output_img is not None:
plt.savefig(output_img, pad_inches=0, bbox_inches='tight', dpi=600)
if show:
plt.show()
plt.close()
def gradient_line_legend(color_maps, labels, num_points=10, handle_length=3):
"""
Creates a legend where each entry is a gradient color line.
:param list color_maps: the color maps used in the legend.
:param list[str] labels: the labels of the legend entries.
:param int num_points: the number of points used to create the gradient.
:param int handle_length: the length of the legend line entries.
"""
assert len(color_maps) == len(labels), 'Number of color maps has to be the same as that of labels!'
color_space = np.linspace(0, 1, num_points)
lines = []
for c_map in color_maps:
lines.append(tuple(Line2D([], [], marker='s', markersize=handle_length, c=c_map(c)) for c in color_space))
plt.legend(lines, labels, numpoints=1,
handler_map={tuple: HandlerTuple(ndivide=None)},
handlelength=handle_length)
def distinct_colors(n):
"""
Generates N visually-distinct colors.
:param int n: the number of colors to generate.
:rtype: np.ndarray
:return: an array of shape (n, 3) with colors in the [R, G, B] normalized format ([0-1]).
"""
return np.array([[x for x in colorsys.hls_to_rgb(i / n, .65, .9)] for i in range(n)])
|
StarcoderdataPython
|
141096
|
""" Just a purple sphere """
from vapory import *
objects = [
# SUN
LightSource([1500,2500,-2500], 'color',1),
# SKY
Sphere( [0,0,0],1, 'hollow',
Texture(
Pigment( 'gradient', [0,1,0],
'color_map{[0 color White] [1 color Blue ]}'
'quick_color', 'White'
),
Finish( 'ambient', 1, 'diffuse', 0)
),
'scale', 10000
),
# GROUND
Plane( [0,1,0], 0 ,
Texture( Pigment( 'color', [1.1*e for e in [0.80,0.55,0.35]])),
Normal( 'bumps', 0.75, 'scale', 0.035),
Finish( 'phong', 0.1 )
),
# PAWN
Union( Sphere([0,1,0],0.35),
Cone([0,0,0],0.5,[0,1,0],0.0),
Texture( Pigment( 'color', [1,0.65,0])),
Finish( 'phong', 0.5)
)
]
scene = Scene( Camera( 'ultra_wide_angle',
'angle',45,
'location',[0.0 , 0.6 ,-3.0],
'look_at', [0.0 , 0.6 , 0.0]
),
objects= objects,
included=['colors.inc']
)
scene.render('pawn.png', remove_temp=False)
|
StarcoderdataPython
|
164737
|
<filename>pypet2bids/pypet2bids/dcm2niix4pet.py
import os
import sys
import warnings
from json_maj.main import JsonMAJ, load_json_or_dict
from pypet2bids.helper_functions import ParseKwargs, get_version, translate_metadata, expand_path
import subprocess
import pandas as pd
from os.path import join
from os import listdir, walk, makedirs
from pathlib import Path
import json
import pydicom
import re
from tempfile import TemporaryDirectory
import shutil
from dateutil import parser
from termcolor import colored
import argparse
import importlib
"""
This module acts as a simple wrapper around dcm2niix, it takes all of the same arguments as dcm2niix but does a little
bit of extra work to conform the output nifti and json from dcm2niix to the PET BIDS specification. Additionally, but
optionally, this module can collect blood or physiological data/metadata from spreadsheet files if the path of that
spreadsheet file as well as a python module/script written to interpret it are provided in addition to relevant dcm2niix
commands.
"""
# fields to check for
module_folder = Path(__file__).parent.resolve()
python_folder = module_folder.parent
pet2bids_folder = python_folder.parent
metadata_folder = join(pet2bids_folder, 'metadata')
try:
# collect metadata jsons in dev mode
metadata_jsons = \
[Path(join(metadata_folder, metadata_json)) for metadata_json
in listdir(metadata_folder) if '.json' in metadata_json]
except FileNotFoundError:
metadata_jsons = \
[Path(join(module_folder, 'metadata', metadata_json)) for metadata_json
in listdir(join(module_folder, 'metadata')) if '.json' in metadata_json]
# create a dictionary to house the PET metadata files
metadata_dictionaries = {}
for metadata_json in metadata_jsons:
try:
with open(metadata_json, 'r') as infile:
dictionary = json.load(infile)
metadata_dictionaries[metadata_json.name] = dictionary
except FileNotFoundError as err:
raise Exception(f"Missing pet metadata file {metadata_json} in {metadata_folder}, unable to validate metadata.")
except json.decoder.JSONDecodeError as err:
raise IOError(f"Unable to read from {metadata_json}")
def check_json(path_to_json, items_to_check=None, silent=False):
"""
This method opens a json and checks to see if a set of mandatory values is present within that json, optionally it
also checks for recommended key value pairs. If fields are not present a warning is raised to the user.
:param path_to_json: path to a json file e.g. a BIDS sidecar file created after running dcm2niix
:param items_to_check: a dictionary with items to check for within that json. If None is supplied defaults to the
PET_metadata.json contained in this repository
:param silent: Raises warnings or errors to stdout if this flag is set to True
:return: dictionary of items existence and value state, if key is True/False there exists/(does not exist) a
corresponding entry in the json the same can be said of value
"""
# check if path exists
path_to_json = Path(path_to_json)
if not path_to_json.exists():
raise FileNotFoundError(path_to_json)
# check for default argument for dictionary of items to check
if items_to_check is None:
items_to_check = metadata_dictionaries['PET_metadata.json']
# open the json
with open(path_to_json, 'r') as in_file:
json_to_check = json.load(in_file)
# initialize warning colors and warning storage dictionary
storage = {}
warning_color = {'mandatory': 'red',
'recommended': 'yellow',
'optional:': 'blue'}
for requirement in items_to_check.keys():
color = warning_color.get(requirement, 'yellow')
for item in items_to_check[requirement]:
if item in json_to_check.keys() and json_to_check.get(item, None):
# this json has both the key and a non blank value do nothing
pass
elif item in json_to_check.keys() and not json_to_check.get(item, None):
if not silent:
print(colored(f"WARNING {item} present but has null value.", "yellow"))
storage[item] = {'key': True, 'value': False}
else:
if not silent:
print(colored(f"WARNING!!!! {item} is not present in {path_to_json}. This will have to be "
f"corrected post conversion.", color))
storage[item] = {'key': False, 'value': False}
return storage
def update_json_with_dicom_value(
path_to_json,
missing_values,
dicom_header,
dicom2bids_json=None
):
"""
We go through all of the missing values or keys that we find in the sidecar json and attempt to extract those
missing entities from the dicom source. This function relies on many heuristics a.k.a. many unique conditionals and
simply is what it is, hate the game not the player.
:param path_to_json: path to the sidecar json to check
:param missing_values: dictionary output from check_json indicating missing fields and/or values
:param dicom_header: the dicom or dicoms that may contain information not picked up by dcm2niix
:param dicom2bids_json: a json file that maps dicom header entities to their corresponding BIDS entities
:return: a dictionary of sucessfully updated (written to the json file) fields and values
"""
# load the sidecar json
sidecar_json = load_json_or_dict(str(path_to_json))
# purely to clean up the generated read the docs page from sphinx, otherwise the entire json appears in the
# read the docs page.
if dicom2bids_json is None:
dicom2bids_json = metadata_dictionaries['dicom2bids.json']
# Units gets written as Unit in older versions of dcm2niix here we check for missing Units and present Unit entity
units = missing_values.get('Units', None)
if units:
try:
# Units is missing, check to see if Unit is present
if sidecar_json.get('Unit', None):
temp = JsonMAJ(path_to_json, {'Units': sidecar_json.get('Unit')})
temp.remove('Unit')
else: # we source the Units value from the dicom header and update the json
JsonMAJ(path_to_json, {'Units': dicom_header.Units})
except AttributeError:
print(f"Dicom is missing Unit(s) field, are you sure this is a PET dicom?")
# pair up dicom fields with bids sidecar json field, we do this in a separate json file
# it's loaded when this script is run and stored in metadata dictionaries
dcmfields = dicom2bids_json['dcmfields']
jsonfields = dicom2bids_json['jsonfields']
regex_cases = ["ReconstructionMethod", "ConvolutionKernel"]
# strip excess characters from dcmfields
dcmfields = [re.sub('[^0-9a-zA-Z]+', '', field) for field in dcmfields]
paired_fields = {}
for index, field in enumerate(jsonfields):
paired_fields[field] = dcmfields[index]
print("Attempting to locate missing BIDS fields in dicom header")
# go through missing fields and reach into dicom to pull out values
json_updater = JsonMAJ(json_path=path_to_json)
for key, value in paired_fields.items():
missing_bids_field = missing_values.get(key, None)
# if field is missing look into dicom
if missing_bids_field:
# there are a few special cases that require regex splitting of the dicom entries
# into several bids sidecar entities
try:
dicom_field = getattr(dicom_header, value)
print(f"FOUND {value} corresponding to BIDS {key}: {dicom_field}")
except AttributeError:
dicom_field = None
print(f"NOT FOUND {value} corresponding to BIDS {key} in dicom header.")
if dicom_field and value in regex_cases:
# if it exists get rid of it, we don't want no part of it.
if sidecar_json.get('ReconMethodName', None):
json_updater.remove('ReconstructionMethod')
if dicom_header.get('ReconstructionMethod', None):
reconstruction_method = dicom_header.ReconstructionMethod
json_updater.remove('ReconstructionMethod')
reconstruction_method = get_recon_method(reconstruction_method)
json_updater.update(reconstruction_method)
# TODO Convolution Kernel
elif dicom_field:
# update json
json_updater.update({key: dicom_field})
# Additional Heuristics are included below
# See if time zero is missing in json
if missing_values.get('TimeZero')['key'] is False or missing_values.get('TimeZero')['value'] is False:
time_parser = parser
acquisition_time = time_parser.parse(dicom_header['AcquisitionTime'].value).time().isoformat()
json_updater.update({'TimeZero': acquisition_time})
json_updater.remove('AcquisitionTime')
json_updater.update({'ScanStart': 0})
else:
pass
if missing_values.get('ScanStart')['key'] is False or missing_values.get('ScanStart')['value'] is False:
json_updater.update({'ScanStart': 0})
if missing_values.get('InjectionStart')['key'] is False or missing_values.get('InjectionStart')['value'] is False:
json_updater.update({'InjectionTime': 0})
# check to see if units are BQML
json_updater = JsonMAJ(str(path_to_json))
if json_updater.get('Units') == 'BQML':
json_updater.update({'Units': 'Bq/mL'})
# Add radionuclide to json
Radionuclide = get_radionuclide(dicom_header)
if Radionuclide:
json_updater.update({'TracerRadionuclide': Radionuclide})
# after updating raise warnings to user if values in json don't match values in dicom headers, only warn!
updated_values = json.load(open(path_to_json, 'r'))
for key, value in paired_fields.items():
try:
json_field = updated_values.get(key)
dicom_field = dicom_header.__getattr__(key)
if json_field != dicom_field:
print(colored(f"WARNING!!!! JSON Field {key} with value {json_field} does not match dicom value of {dicom_field}",
"yellow"))
except AttributeError:
pass
def dicom_datetime_to_dcm2niix_time(dicom=None, date='', time=''):
"""
Dcm2niix provides the option of outputing the scan data and time into the .nii and .json filename at the time of
conversion if '%t' is provided following the '-f' flag. The result is the addition of a date time string of the
format. This function similarly generates the same datetime string from a dicom header.
:param dicom: pydicom.dataset.FileDataset object or a path to a dicom
:param date: a given date, used in conjunction with time to supply a date time
:param time: a given time, used in conjunction with date
:return: a datetime string that corresponds to the converted filenames from dcm2niix when used with the `-f %t` flag
"""
if dicom:
if type(dicom) is pydicom.dataset.FileDataset:
# do nothing
pass
elif type(dicom) is str:
try:
dicom_path = Path(dicom)
dicom = pydicom.dcmread(dicom_path)
except TypeError:
raise TypeError(f"dicom {dicom} must be either a pydicom.dataset.FileDataSet object or a "
f"valid path to a dicom file")
parsed_date = dicom.StudyDate
parsed_time = str(round(float(dicom.StudyTime)))
if len(parsed_time) < 6:
zeros_to_pad = 6 - len(parsed_time)
parsed_time = zeros_to_pad * '0' + parsed_time
elif date and time:
parsed_date = date
parsed_time = str(round(float(time)))
return parsed_date + parsed_time
def collect_date_time_from_file_name(file_name):
"""
Collects the date and time from a nifti or a json produced by dcm2niix when dcm2niix is run with the options
%p_%i_%t_%s. This datetime us used to match a dicom header object to the resultant file. E.G. if there are missing
BIDS fields in the json produced by dcm2niix it's hopeful that the dicom header may contain the missing info.
:param file_name: name of the file to extract the date time info from, this should be a json ouput file from
dcm2niix
:return: a date and time object
"""
date_time_string = re.search(r'(?!\_)[0-9]{14}(?=\_)', file_name)
if date_time_string:
date = date_time_string[0][0:8]
time = date_time_string[0][8:]
else:
raise Exception(f"Unable to parse date_time string from filename: {file_name}")
return date, time
class Dcm2niix4PET:
def __init__(self, image_folder, destination_path=None, metadata_path=None,
metadata_translation_script=None, additional_arguments=None, file_format='%p_%i_%t_%s',
silent=False):
"""
This class is a simple wrapper for dcm2niix and contains methods to do the following in order:
- Convert a set of dicoms into .nii and .json sidecar files
- Inspect the .json sidecar files for any missing BIDS PET fields or values
- If there are missing BIDS PET fields or values this class will attempt to extract them from the dicom
header, a metadata file using a metadata translation script, and lastly from user supplied key pair
arguments.
# class is instantiated:
converter = Dcm2niix4PET(...)
# then conversion is run by calling run_dcm2niix
converter.run_dcm2niix()
Conversion is performed in a temporary directory to make matching dicom headers to dcm2niix output files easier
(and to avoid leaving intermediary files persisting on disc). After which, these files are then moved the
destination directory.
:param image_folder: folder containing a single series/session of dicoms
:param destination_path: destination path for dcm2niix output nii and json files
:param metadata_path: path to excel, csv, or text file with PET metadata (radioligand, blood, etc etc)
:param metadata_translation_script: python file to extract and transform data contained in the metadata_path
:param file_format: the file format that we want dcm2niix to use, by default %p_%i_%t_%s
%p -> protocol
%i -> ID of patient
%t -> time
%s -> series number
:param additional_arguments: user supplied key value pairs, E.g. TimeZero=12:12:12, InjectedRadioactivity=1
this key value pair will overwrite any fields in the dcm2niix produced nifti sidecar.json as it is assumed that
the user knows more about converting their data than the heuristics within dcm2niix, this library, or even the
dicom header
:param silent: silence missing sidecar metadata messages, default is False and very verbose
"""
# check to see if dcm2niix is installed
self.blood_json = None
self.blood_tsv = None
self.check_for_dcm2niix()
self.image_folder = Path(image_folder)
if destination_path:
self.destination_path = Path(destination_path)
else:
self.destination_path = self.image_folder
self.subject_id = None
self.dicom_headers = self.extract_dicom_headers()
self.spreadsheet_metadata = {}
# if there's a spreadsheet and if there's a provided python script use it to manipulate the data in the
# spreadsheet
if metadata_path and metadata_translation_script:
self.metadata_path = Path(metadata_path)
self.metadata_translation_script = Path(metadata_translation_script)
if self.metadata_path.exists() and self.metadata_translation_script.exists():
# load the spreadsheet into a dataframe
self.extract_metadata()
# next we use the loaded python script to extract the information we need
self.load_spread_sheet_data()
self.additional_arguments = additional_arguments
self.file_format = file_format
# we may want to include additional information to the sidecar, tsv, or json files generated after conversion
# this variable stores the mapping between output files and a single dicom header used to generate those files
# to access the dicom header information use the key in self.headers_to_files to access that specific header
# in self.dicom_headers
self.headers_to_files = {}
# if silent is set to True output warnings aren't displayed to stdout/stderr
self.silent = silent
@staticmethod
def check_for_dcm2niix():
"""
Just checks for dcm2niix using the system shell, returns 0 if dcm2niix is present.
:return: status code of the command dcm2niix -h
"""
check = subprocess.run("dcm2niix -h", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if check.returncode != 0:
pkged = "https://github.com/rordenlab/dcm2niix/releases"
instructions = "https://github.com/rordenlab/dcm2niix#install"
raise Exception(f"""Dcm2niix does not appear to be installed. Installation instructions can be found here
{instructions} and packaged versions can be found at {pkged}""")
return check.returncode
def extract_dicom_headers(self, depth=1):
"""
Opening up files till a dicom is located, then extracting any header information
to be used during and after the conversion process. This includes patient/subject id,
as well any additional frame or metadata that's required for conversion.
:param depth: the number of dicoms to collect per folder, defaults to 1 as it assumes a single sessions worth of
dicoms is included per folder.
:return: dicom header information to self.subject_id and/or self.dicom_header_data
"""
n = 0
dicom_headers = {}
for root, dirs, files in walk(self.image_folder):
for f in files:
if n >= depth:
break
try:
dicom_path = Path(join(root, f))
dicom_header = pydicom.dcmread(dicom_path, stop_before_pixels=True)
# collect subject/patient id if none is supplied
if self.subject_id is None:
self.subject_id = dicom_header.PatientID
dicom_headers[dicom_path.name] = dicom_header
except pydicom.errors.InvalidDicomError:
pass
n += 1
return dicom_headers
def run_dcm2niix(self):
"""
This runs dcm2niix and uses the other methods within this class to supplement the sidecar json's produced as
dcm2niix output.
:return: the path to the output of dcm2niix and the modified sidecar jsons
"""
if self.file_format:
file_format_args = f"-f {self.file_format}"
else:
file_format_args = ""
with TemporaryDirectory() as tempdir:
tempdir_pathlike = Path(tempdir)
convert = subprocess.run(f"dcm2niix -w 1 -z y {file_format_args} -o {tempdir_pathlike} {self.image_folder}",
shell=True,
capture_output=True)
if convert.returncode != 0:
print("Check output .nii files, dcm2iix returned these errors during conversion:")
if bytes("Skipping existing file name", "utf-8") not in convert.stdout or convert.stderr:
print(convert.stderr)
elif convert.returncode != 0 and bytes("Error: Check sorted order",
"utf-8") in convert.stdout or convert.stderr:
print("Possible error with frame order, is this a phillips dicom set?")
print(convert.stdout)
print(convert.stderr)
# collect contents of the tempdir
files_created_by_dcm2niix = [join(tempdir_pathlike, file) for file in listdir(tempdir_pathlike)]
# make sure destination path exists if not try creating it.
if self.destination_path.exists():
pass
else:
makedirs(self.destination_path)
# iterate through created files to supplement sidecar jsons
for created in files_created_by_dcm2niix:
created_path = Path(created)
if created_path.suffix == '.json':
# we want to pair up the headers to the files created in the output directory in case
# dcm2niix has created files from multiple sessions
matched_dicoms_and_headers = self.match_dicom_header_to_file(destination_path=tempdir_pathlike)
# we check to see what's missing from our recommended and required jsons by gathering the
# output of check_json silently
check_for_missing = check_json(created_path, silent=True)
# we do our best to extrat information from the dicom header and insert theses values
# into the sidecar json
# first do a reverse lookup of the key the json corresponds to
lookup = [key for key, value in matched_dicoms_and_headers.items() if str(created_path) in value]
if lookup:
dicom_header = self.dicom_headers[lookup[0]]
update_json_with_dicom_value(
created_path,
check_for_missing,
dicom_header,
dicom2bids_json=metadata_dictionaries['dicom2bids.json'])
# if we have entities in our metadata spreadsheet that we've used we update
if self.spreadsheet_metadata.get('nifti_json', None):
update_json = JsonMAJ(json_path=str(created),
update_values=self.spreadsheet_metadata['nifti_json'])
update_json.update()
# next we check to see if any of the additional user supplied arguments (kwargs) correspond to
# any of the missing tags in our sidecars
if self.additional_arguments:
update_json = JsonMAJ(json_path=str(created),
update_values=self.additional_arguments)
update_json.update()
# there are some additional updates that depend on some PET BIDS logic that we do next, since these
# updates depend on both information provided via the sidecar json and/or information provided via
# additional arguments we run this step after updating the sidecar with those additional user
# arguments
sidecar_json = JsonMAJ(json_path=str(created)) # load all supplied and now written sidecar data in
check_metadata_radio_inputs = check_meta_radio_inputs(sidecar_json.json_data) # run logic
sidecar_json.update(check_metadata_radio_inputs) # update sidecar json with results of logic
# check to see if convolution kernel is present
sidecar_json = JsonMAJ(json_path=str(created))
if sidecar_json.get('ConvolutionKernel'):
if sidecar_json.get('ReconFilterType') and sidecar_json.get('ReconFilterSize'):
sidecar_json.remove('ConvolutionKernel')
else:
# collect filter size
recon_filter_size = ''
if re.search('\d+.\d+', sidecar_json.get('ConvolutionKernel')):
recon_filter_size = re.search('\d+.\d', sidecar_json.get('ConvolutionKernel'))[0]
# collect just the filter type by popping out the filter size if it exists
recon_filter_type = re.sub(recon_filter_size, '', sidecar_json.get('ConvolutionKernel'))
# update the json
sidecar_json.update({
'ReconFilterSize': recon_filter_size,
'ReconFilterType': recon_filter_type})
# remove non bids field
sidecar_json.remove('ConvolutionKernel')
# tag json with additional conversion software
conversion_software = sidecar_json.get('ConversionSoftware')
conversion_software_version = sidecar_json.get('ConversionSoftwareVersion')
sidecar_json.update({'ConversionSoftware': [conversion_software, 'pypet2bids']})
sidecar_json.update({'ConversionSoftwareVersion': [conversion_software_version, get_version()]})
new_path = Path(join(self.destination_path, created_path.name))
shutil.move(src=created, dst=new_path)
return self.destination_path
def post_dcm2niix(self):
with TemporaryDirectory() as temp_dir:
tempdir_path = Path(temp_dir)
if self.subject_id != list(self.dicom_headers.values())[0].PatientID:
blood_file_name_w_out_extension = "sub-" + self.subject_id + "_blood"
elif self.dicom_headers:
# collect date and time and series number from dicom
first_dicom = list(self.dicom_headers.values())[0]
date_time = dicom_datetime_to_dcm2niix_time(first_dicom)
series_number = str(first_dicom.SeriesNumber)
protocol_name = str(first_dicom.SeriesDescription).replace(" ", "_")
blood_file_name_w_out_extension = protocol_name + '_' + self.subject_id + '_' + date_time + '_' + \
series_number + "_blood"
if self.spreadsheet_metadata.get('blood_tsv', None) is not None:
blood_tsv_data = self.spreadsheet_metadata.get('blood_tsv')
if type(blood_tsv_data) is pd.DataFrame:
# write out blood_tsv using pandas csv write
blood_tsv_data.to_csv(join(tempdir_path, blood_file_name_w_out_extension + ".tsv")
, sep='\t',
index=False)
elif type(blood_tsv_data) is str:
# write out with write
with open(join(tempdir_path, blood_file_name_w_out_extension + ".tsv"), 'w') as outfile:
outfile.writelines(blood_tsv_data)
else:
raise (f"blood_tsv dictionary is incorrect type {type(blood_tsv_data)}, must be type: "
f"pandas.DataFrame or str\nCheck return type of {translate_metadata} in "
f"{self.metadata_translation_script}")
if self.spreadsheet_metadata.get('blood_json', {}) != {}:
blood_json_data = self.spreadsheet_metadata.get('blood_json')
if type(blood_json_data) is dict:
# write out to file with json dump
pass
elif type(blood_json_data) is str:
# write out to file with json dumps
blood_json_data = json.loads(blood_json_data)
else:
raise (f"blood_json dictionary is incorrect type {type(blood_json_data)}, must be type: dict or str"
f"pandas.DataFrame or str\nCheck return type of {translate_metadata} in "
f"{self.metadata_translation_script}")
with open(join(tempdir_path, blood_file_name_w_out_extension + '.json'), 'w') as outfile:
json.dump(blood_json_data, outfile, indent=4)
blood_files = [join(str(tempdir_path), blood_file) for blood_file in os.listdir(str(tempdir_path))]
for blood_file in blood_files:
shutil.move(blood_file, join(self.destination_path, os.path.basename(blood_file)))
def convert(self):
self.run_dcm2niix()
self.post_dcm2niix()
def match_dicom_header_to_file(self, destination_path=None):
"""
Matches a dicom header to a nifti or json file produced by dcm2niix, this is run after dcm2niix converts the
input dicoms into nifti's and json's.
:param destination_path: the path dcm2niix generated files are placed at, collected during class instantiation
:return: a dictionary of headers matched to nifti and json file names
"""
if not destination_path:
destination_path = self.destination_path
# first collect all of the files in the output directory
output_files = [join(destination_path, output_file) for output_file in listdir(destination_path)]
# create empty dictionary to store pairings
headers_to_files = {}
# collect study date and time from header
for each in self.dicom_headers:
header_study_date = self.dicom_headers[each].StudyDate
header_acquisition_time = str(round(float(self.dicom_headers[each].StudyTime)))
if len(header_acquisition_time) < 6:
header_acquisition_time = (6 - len(header_acquisition_time)) * "0" + header_acquisition_time
header_date_time = dicom_datetime_to_dcm2niix_time(date=header_study_date, time=header_acquisition_time)
for output_file in output_files:
if header_date_time in output_file:
try:
headers_to_files[each].append(output_file)
except KeyError:
headers_to_files[each] = [output_file]
return headers_to_files
def extract_metadata(self):
"""
Opens up a metadata file and reads it into a pandas dataframe
:return: a pd dataframe object
"""
# collect metadata from spreadsheet
metadata_extension = Path(self.metadata_path).suffix
self.open_meta_data(metadata_extension)
def open_meta_data(self, extension):
"""
Opens a text metadata file with the pandas method most appropriate for doing so based on the metadata
file's extension.
:param extension: The extension of the file
:return: a pandas dataframe representation of the spreadsheet/metadatafile
"""
methods = {
'excel': pd.read_excel,
'csv': pd.read_csv
}
if 'xls' in extension:
proper_method = 'excel'
else:
proper_method = extension
try:
use_me_to_read = methods.get(proper_method, None)
self.metadata_dataframe = use_me_to_read(self.metadata_path)
except IOError as err:
raise err(f"Problem opening {self.metadata_path}")
def load_spread_sheet_data(self):
text_file_data = {}
if self.metadata_translation_script:
try:
# this is where the goofiness happens, we allow the user to create their own custom script to manipulate
# data from their particular spreadsheet wherever that file is located.
spec = importlib.util.spec_from_file_location("metadata_translation_script",
self.metadata_translation_script)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
text_file_data = module.translate_metadata(self.metadata_dataframe)
except AttributeError as err:
print(f"Unable to locate metadata_translation_script")
self.spreadsheet_metadata['blood_tsv'] = text_file_data.get('blood_tsv', {})
self.spreadsheet_metadata['blood_json'] = text_file_data.get('blood_json', {})
self.spreadsheet_metadata['nifti_json'] = text_file_data.get('nifti_json', {})
# noinspection PyPep8Naming
def get_recon_method(ReconstructionMethodString: str) -> dict:
"""
Given the reconstruction method from a dicom header this function does its best to determine the name of the
reconstruction, the number of iterations used in the reconstruction, and the number of subsets in the
reconstruction.
:param ReconstructionMethodString:
:return: dictionary containing PET BIDS fields ReconMethodName, ReconMethodParameterUnits,
ReconMethodParameterLabels, and ReconMethodParameterValues
"""
contents = ReconstructionMethodString
subsets = None
iterations = None
ReconMethodParameterUnits = ["none", "none"]
ReconMethodParameterLabels = ["subsets", "iterations"]
# determine order of recon iterations and subsets, this is not a surefire way to determine this...
iter_sub_combos = {
'iter_first': [r'\d\di\ds', r'\d\di\d\ds', r'\di\ds', r'\di\d\ds',
r'i\d\ds\d', r'i\d\ds\d\d', r'i\ds\d', r'i\ds\d\d'],
'sub_first': [r'\d\ds\di', r'\d\ds\d\di', r'\ds\di', r'\ds\d\di',
r's\d\di\d', r's\d\di\d\d', r's\di\d', r's\di\d\d'],
}
iter_sub_combos['iter_first'] = [re.compile(regex) for regex in iter_sub_combos['iter_first']]
iter_sub_combos['sub_first'] = [re.compile(regex) for regex in iter_sub_combos['sub_first']]
order = None
possible_iter_sub_strings = []
iteration_subset_string = None
# run through possible combinations of iteration substitution strings in iter_sub_combos
for key, value in iter_sub_combos.items():
for expression in value:
iteration_subset_string = expression.search(contents)
if iteration_subset_string:
order = key
iteration_subset_string = iteration_subset_string[0]
possible_iter_sub_strings.append(iteration_subset_string)
# if matches get ready to pick one
if possible_iter_sub_strings:
# sorting matches by string length as our method can return more than one match e.g. 3i21s will return
# 3i21s and 3i1s or something similar
possible_iter_sub_strings.sort(key=len)
# picking the longest match as it's most likely the correct one
iteration_subset_string = possible_iter_sub_strings[-1]
# after we've captured the subsets and iterations we next need to separate them out from each other
if iteration_subset_string and order:
# remove all chars replace with spaces
just_digits = re.sub(r'[a-zA-Z]', " ", iteration_subset_string)
just_digits = just_digits.strip()
# split up subsets and iterations
just_digits = just_digits.split(" ")
# assign digits to either subsets or iterations based on order information obtained earlier
if order == 'iter_first' and len(just_digits) == 2:
iterations = int(just_digits[0])
subsets = int(just_digits[1])
elif len(just_digits) == 2:
iterations = int(just_digits[1])
subsets = int(just_digits[0])
else:
# if we don't have both we decide that we don't have either, flawed but works for the samples in
# test_dcm2niix4pet.py may. Will be updated when non-conforming data is obtained
pass # do nothing, this case shouldn't fire.....
if iteration_subset_string:
ReconMethodName = re.sub(iteration_subset_string, "", contents)
else:
ReconMethodName = contents
# cleaning up weird chars at end or start of name
ReconMethodName = re.sub(r'[^a-zA-Z0-9]$', "", ReconMethodName)
ReconMethodName = re.sub(r'^[^a-zA-Z0-9]', "", ReconMethodName)
# get everything in front of \d\di or \di or \d\ds or \ds
return {
"ReconMethodName": ReconMethodName,
"ReconMethodParameterUnits": ReconMethodParameterUnits,
"ReconMethodParameterLabels": ReconMethodParameterLabels,
"ReconMethodParameterValues": [subsets, iterations]
}
def check_meta_radio_inputs(kwargs: dict) -> dict:
InjectedRadioactivity = kwargs.get('InjectedRadioactivity', None)
InjectedMass = kwargs.get("InjectedMass", None)
SpecificRadioactivity = kwargs.get("SpecificRadioactivity", None)
MolarActivity = kwargs.get("MolarActivity", None)
MolecularWeight = kwargs.get("MolecularWeight", None)
data_out = {}
if InjectedRadioactivity and InjectedMass:
data_out['InjectedRadioactivity'] = InjectedRadioactivity
data_out['InjectedRadioactivityUnits'] = 'MBq'
data_out['InjectedMass'] = InjectedMass
data_out['InjectedMassUnits'] = 'ug'
# check for strings where there shouldn't be strings
numeric_check = [str(InjectedRadioactivity).isnumeric(), str(InjectedMass).isnumeric()]
if False in numeric_check:
data_out['InjectedMass'] = 'n/a'
data_out['InjectedMassUnits'] = 'n/a'
else:
tmp = (InjectedRadioactivity*10**6)/(InjectedMass*10**6)
if SpecificRadioactivity:
if SpecificRadioactivity != tmp:
print(colored("WARNING inferred SpecificRadioactivity in Bq/g doesn't match InjectedRadioactivity "
"and InjectedMass, could be a unit issue", "yellow"))
data_out['SpecificRadioactivity'] = SpecificRadioactivity
data_out['SpecificRadioactivityUnits'] = kwargs.get('SpecificRadioactivityUnityUnits', 'n/a')
else:
data_out['SpecificRadioactivity'] = tmp
data_out['SpecificRadioactivityUnits'] = 'Bq/g'
if InjectedRadioactivity and SpecificRadioactivity:
data_out['InjectedRadioactivity'] = InjectedRadioactivity
data_out['InjectedRadioactivityUnits'] = 'MBq'
data_out['SpecificRadioactivity'] = SpecificRadioactivity
data_out['SpecificRadioactivityUnits'] = 'Bq/g'
numeric_check = [str(InjectedRadioactivity).isnumeric(), str(SpecificRadioactivity).isnumeric()]
if False in numeric_check:
data_out['InjectedMass'] = 'n/a'
data_out['InjectedMassUnits'] = 'n/a'
else:
tmp = ((InjectedRadioactivity*10**6)/SpecificRadioactivity)*10**6
if InjectedMass:
if InjectedMass != tmp:
print(colored("WARNING Infered InjectedMass in ug doesn''t match InjectedRadioactivity and "
"InjectedMass, could be a unit issue", "yellow"))
data_out['InjectedMass'] = InjectedMass
data_out['InjectedMassUnits'] = kwargs.get('InjectedMassUnits', 'n/a')
else:
data_out['InjectedMass'] = tmp
data_out['InjectedMassUnits'] = 'ug'
if InjectedMass and SpecificRadioactivity:
data_out['InjectedMass'] = InjectedMass
data_out['InjectedMassUnits'] = 'ug'
data_out['SpecificRadioactivity'] = SpecificRadioactivity
data_out['SpecificRadioactivityUnits'] = 'Bq/g'
numeric_check = [str(SpecificRadioactivity).isnumeric(), str(InjectedMass).isnumeric()]
if False in numeric_check:
data_out['InjectedRadioactivity'] = 'n/a'
data_out['InjectedRadioactivityUnits'] = 'n/a'
else:
tmp = ((InjectedMass / 10 ** 6) / SpecificRadioactivity) / 10 ** 6 # ((ug / 10 ^ 6) / Bq / g)/10 ^ 6 = MBq
if InjectedRadioactivity:
if InjectedRadioactivity != tmp:
print(colored("WARNING infered InjectedRadioactivity in MBq doesn't match SpecificRadioactivity "
"and InjectedMass, could be a unit issue", "yellow"))
data_out['InjectedRadioactivity'] = InjectedRadioactivity
data_out['InjectedRadioactivityUnits'] = kwargs.get('InjectedRadioactivityUnits', 'n/a')
else:
data_out['InjectedRadioactivity'] = tmp
data_out['InjectedRadioactivityUnits'] = 'MBq'
if MolarActivity and MolecularWeight:
data_out['MolarActivity'] = MolarActivity
data_out['MolarActivityUnits'] = 'GBq/umol'
data_out['MolecularWeight'] = MolecularWeight
data_out['MolecularWeightUnits'] = 'g/mol'
numeric_check = [str(MolarActivity).isnumeric(), str(MolecularWeight).isnumeric()]
if False in numeric_check:
data_out['SpecificRadioactivity'] = 'n/a'
data_out['SpecificRadioactivityUnits'] = 'n/a'
else:
tmp = (MolarActivity * 10 ** 6) / (
MolecularWeight / 10 ** 6) # (GBq / umol * 10 ^ 6) / (g / mol / * 10 ^ 6) = Bq / g
if SpecificRadioactivity:
if SpecificRadioactivity != tmp:
print(colored('infered SpecificRadioactivity in MBq/ug doesn''t match Molar Activity and Molecular '
'Weight, could be a unit issue', 'yellow'))
data_out['SpecificRadioactivity'] = SpecificRadioactivity
data_out['SpecificRadioactivityUnits'] = kwargs.get('SpecificRadioactivityUnityUnits', 'n/a')
else:
data_out['SpecificRadioactivity'] = tmp
data_out['SpecificRadioactivityUnits'] = 'Bq/g'
if MolarActivity and SpecificRadioactivity:
data_out['SpecificRadioactivity'] = SpecificRadioactivity
data_out['SpecificRadioactivityUnits'] = 'MBq/ug'
data_out['MolarActivity'] = MolarActivity
data_out['MolarActivityUnits'] = 'GBq/umol'
numeric_check = [str(SpecificRadioactivity).isnumeric(), str(MolarActivity).isnumeric()]
if False in numeric_check:
data_out['MolecularWeight'] = 'n/a'
data_out['MolecularWeightUnits'] = 'n/a'
else:
tmp = (SpecificRadioactivity / 1000) / MolarActivity # (MBq / ug / 1000) / (GBq / umol) = g / mol
if MolecularWeight:
if MolecularWeight != tmp:
print(colored("WARNING Infered MolecularWeight in MBq/ug doesn't match Molar Activity and "
"Molecular Weight, could be a unit issue", 'yellow'))
data_out['MolecularWeight'] = MolecularWeight
data_out['MolecularWeightUnits'] = kwargs.get('MolecularWeightUnits', 'n/a')
else:
data_out.MolecularWeight = tmp
data_out.MolecularWeightUnits = 'g/mol'
if MolecularWeight and SpecificRadioactivity:
data_out['SpecificRadioactivity'] = SpecificRadioactivity
data_out['SpecificRadioactivityUnits'] = 'MBq/ug'
data_out['MolecularWeight'] = MolarActivity
data_out['MolecularWeightUnits'] = 'g/mol'
numeric_check = [str(SpecificRadioactivity).isnumeric(), str(MolecularWeight).isnumeric()]
if False in numeric_check:
data_out['MolarActivity'] = 'n/a'
data_out['MolarActivityUnits'] = 'n/a'
else:
tmp = MolecularWeight * (SpecificRadioactivity / 1000) # g / mol * (MBq / ug / 1000) = GBq / umol
if MolarActivity:
if MolarActivity != tmp:
print(colored("WARNING infered MolarActivity in GBq/umol doesn''t match Specific Radioactivity and "
"Molecular Weight, could be a unit issue", "yellow"))
data_out['MolarActivity'] = MolarActivity
data_out['MolarActivityUnits'] = kwargs.get('MolarActivityUnits', 'n/a')
else:
data_out['MolarActivity'] = tmp
data_out['MolarActivityUnits'] = 'GBq/umol'
return data_out
def get_radionuclide(pydicom_dicom):
"""
Gets the radionuclide if given a pydicom_object if
pydicom_object.RadiopharmaceuticalInformationSequence[0].RadionuclideCodeSequence exists
:param pydicom_dicom: dicom object collected by pydicom.dcmread("dicom_file.img")
:return: Labeled Radionuclide e.g. 11Carbon, 18Flourine
"""
try:
radiopharmaceutical_information_sequence = pydicom_dicom.RadiopharmaceuticalInformationSequence
radionuclide_code_sequence = radiopharmaceutical_information_sequence[0].RadionuclideCodeSequence
code_value = radionuclide_code_sequence[0].CodeValue
code_meaning = radionuclide_code_sequence[0].CodeMeaning
extraction_good = True
except AttributeError:
warnings.warn("Unable to extract RadioNuclideCodeSequence from RadiopharmaceuticalInformationSequence")
radionuclide = ""
extraction_good = False
if extraction_good:
# check to see if these nucleotides appear in our verified values
verified_nucleotides = metadata_dictionaries['dicom2bids.json']['RadionuclideCodes']
check_code_value = ""
check_code_meaning = ""
if code_value in verified_nucleotides.keys():
check_code_value = code_value
else:
warnings.warn(f"Radionuclide Code {code_value} does not match any known codes in dcm2bids.json\n"
f"will attempt to infer from code meaning {code_meaning}")
if code_meaning in verified_nucleotides.values():
radionuclide = re.sub(r'\^', "", code_meaning)
check_code_meaning = code_meaning
else:
warnings.warn(f"Radionuclide Meaning {code_meaning} not in known values in dcm2bids json")
if code_value in verified_nucleotides.keys:
radionuclide = re.sub(r'\^', "", verified_nucleotides[code_value])
# final check
if check_code_meaning and check_code_value:
pass
else:
warnings.warn(f"WARNING!!!! Possible mismatch between nuclide code meaning {code_meaning} and {code_value} in dicom "
f"header")
return radionuclide
def cli():
"""
Collects arguments used to initiate a Dcm2niix4PET class, collects the following arguments from the user.
:param folder: folder containing imaging data, no flag required
:param -m, --metadata-path: path to PET metadata spreadsheet
:param -t, --translation-script-path: path to script used to extract information from metadata spreadsheet
:param -d, --destination-path: path to place outputfiles post conversion from dicom to nifti + json
:return: arguments collected from argument parser
"""
parser = argparse.ArgumentParser()
parser.add_argument('folder', type=str, default=os.getcwd(),
help="Folder path containing imaging data")
parser.add_argument('--metadata-path', '-m', type=str, default=None,
help="Path to metadata file for scan")
parser.add_argument('--translation-script-path', '-t', default=None,
help="Path to a script written to extract and transform metadata from a spreadsheet to BIDS" +
" compliant text files (tsv and json)")
parser.add_argument('--destination-path', '-d', type=str, default=None,
help="Destination path to send converted imaging and metadata files to. If " +
"omitted defaults to using the path supplied to folder path. If destination path " +
"doesn't exist an attempt to create it will be made.", required=False)
parser.add_argument('--kwargs', '-k', nargs='*', action=ParseKwargs, default={},
help="Include additional values int the nifti sidecar json or override values extracted from "
"the supplied nifti. e.g. including `--kwargs TimeZero='12:12:12'` would override the "
"calculated TimeZero. Any number of additional arguments can be supplied after --kwargs "
"e.g. `--kwargs BidsVariable1=1 BidsVariable2=2` etc etc.")
parser.add_argument('--silent', '-s', type=bool, default=False, help="Display missing metadata warnings and errors"
"to stdout/stderr")
args = parser.parse_args()
return args
def main():
"""
Executes cli() and uses Dcm2niix4PET class to convert a folder containing dicoms into nifti + json.
:return: None
"""
# collect args
cli_args = cli()
# instantiate class
converter = Dcm2niix4PET(
image_folder=expand_path(cli_args.folder),
destination_path=expand_path(cli_args.destination_path),
metadata_path=expand_path(cli_args.metadata_path),
metadata_translation_script=expand_path(cli_args.translation_script_path),
additional_arguments=cli_args.kwargs,
silent=cli_args.silent)
converter.convert()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3218155
|
<reponame>the-aerospace-corporation/ITU-Rpy<gh_stars>0
__all__ = ['itu453', 'itu530', 'itu618', 'itu676', 'itu835', 'itu836',
'itu837', 'itu838', 'itu839', 'itu840', 'itu1144', 'itu1510',
'itu1511', 'itu1853']
import itur.models.itu453
import itur.models.itu530
import itur.models.itu618
import itur.models.itu835
import itur.models.itu836
import itur.models.itu837
import itur.models.itu838
import itur.models.itu839
import itur.models.itu840
import itur.models.itu1144
import itur.models.itu1510
import itur.models.itu1511
import itur.models.itu1853
|
StarcoderdataPython
|
1697440
|
<gh_stars>0
#!/usr/bin/env python2
#
# sumo-launchd.py -- SUMO launcher daemon for use with TraCI clients
# Copyright (C) 2006-2012 <NAME> <<EMAIL>>
#
# Documentation for these modules is at http://veins.car2x.org/
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
For each incoming TCP connection the daemon receives a launch configuration.
It starts SUMO accordingly, then proxies all TraCI Messages.
The launch configuration must be sent in the very first TraCI message.
This message must contain a single command, CMD_FILE_SEND and be used to
send a file named "sumo-launchd.launch.xml", which has the following
structure:
<?xml version="1.0"?>
<launch>
<basedir path="/home/sommer/src/inet/examples/erlangen6" />
<seed value="1234" />
<copy file="net.net.xml" />
<copy file="routes.rou.xml" />
<copy file="sumo.sumo.cfg" type="config" />
</launch>
"""
import os
import sys
import tempfile
import shutil
import socket
import struct
import subprocess
import time
import signal
import exceptions
import thread
import xml.dom.minidom
import select
import logging
import atexit
from optparse import OptionParser
_API_VERSION = 1
_LAUNCHD_VERSION = 'sumo-launchd.py 1.00'
_CMD_GET_VERSION = 0x00
_CMD_FILE_SEND = 0x75
_SUMO_HOST = '127.0.0.1'
_SUMO_PORT = 10002
class UnusedPortLock:
lock = thread.allocate_lock()
def __init__(self):
self.acquired = False
def __enter__(self):
self.acquire()
def __exit__(self):
self.release()
def acquire(self):
if not self.acquired:
logging.debug("Claiming lock on port")
UnusedPortLock.lock.acquire()
self.acquired = True
def release(self):
if self.acquired:
logging.debug("Releasing lock on port")
UnusedPortLock.lock.release()
self.acquired = False
def find_unused_port():
"""
Return an unused port number.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.bind(('127.0.0.1', 0))
sock.listen(socket.SOMAXCONN)
ipaddr, port = sock.getsockname()
sock.close()
return port
def forward_connection(client_socket, server_socket, process):
"""
Proxy connections until either socket runs out of data or process terminates.
"""
logging.debug("Starting proxy mode")
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
server_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
do_exit = False
while not do_exit:
(r, w, e) = select.select([client_socket, server_socket], [], [client_socket, server_socket], 1)
if client_socket in e:
do_exit = True
if server_socket in e:
do_exit = True
if client_socket in r:
try:
data = client_socket.recv(65535)
if data == "":
do_exit = True
except:
do_exit = True
finally:
server_socket.send(data)
if server_socket in r:
try:
data = server_socket.recv(65535)
if data == "":
do_exit = True
except:
do_exit = True
finally:
client_socket.send(data)
logging.debug("Done with proxy mode")
def parse_launch_configuration(launch_xml_string):
"""
Returns tuple of options set in launch configuration
"""
p = xml.dom.minidom.parseString(launch_xml_string)
# get root node "launch"
launch_node = p.documentElement
if (launch_node.tagName != "launch"):
raise RuntimeError("launch config root element not <launch>, but <%s>" % launch_node.tagName)
# get "launch.basedir"
basedir = ""
basedir_nodes = [x for x in launch_node.getElementsByTagName("basedir") if x.parentNode==launch_node]
if len(basedir_nodes) > 1:
raise RuntimeError('launch config contains %d <basedir> nodes, expected at most 1' % (len(basedir_nodes)))
elif len(basedir_nodes) == 1:
basedir = basedir_nodes[0].getAttribute("path")
logging.debug("Base dir is %s" % basedir)
# get "launch.seed"
seed = 23423
seed_nodes = [x for x in launch_node.getElementsByTagName("seed") if x.parentNode==launch_node]
if len(seed_nodes) > 1:
raise RuntimeError('launch config contains %d <seed> nodes, expected at most 1' % (len(seed_nodes)))
elif len(seed_nodes) == 1:
seed = int(seed_nodes[0].getAttribute("value"))
logging.debug("Seed is %d" % seed)
# get list of "launch.copy" entries
copy_nodes = [x for x in launch_node.getElementsByTagName("copy") if x.parentNode==launch_node]
return (basedir, copy_nodes, seed)
def run_sumo(runpath, sumo_command, shlex, config_file_name, remote_port, seed, client_socket, unused_port_lock, keep_temp):
"""
Actually run SUMO.
"""
# create log files
sumoLogOut = open(os.path.join(runpath, 'sumo-launchd.out.log'), 'w')
sumoLogErr = open(os.path.join(runpath, 'sumo-launchd.err.log'), 'w')
# start SUMO
sumo_start = int(time.time())
sumo_end = None
sumo_returncode = -1
sumo_status = None
try:
cmd = []
if shlex:
import shlex
cmd = shlex.split(sumo_command.replace('{}', '-c ' + unicode(config_file_name).encode()))
else:
cmd = [sumo_command, "-c", config_file_name]
logging.info("Starting SUMO (%s) on port %d, seed %d" % (" ".join(cmd), remote_port, seed))
sumo = subprocess.Popen(cmd, cwd=runpath, stdin=None, stdout=sumoLogOut, stderr=sumoLogErr)
sumo_socket = None
connected = False
tries = 1
while not connected:
try:
logging.debug("Connecting to SUMO (%s) on port %d (try %d)" % (" ".join(cmd), remote_port, tries))
sumo_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# ----- DEFAULT: default code. -----
# sumo_socket.connect(('127.0.0.1', remote_port))
# ----- START: my code. -----
global _SUMO_PORT
global _SUMO_HOST
sumo_socket.connect((_SUMO_HOST, _SUMO_PORT))
# ----- END: my code. -----
handle_set_order(sumo_socket, 1)
break
except socket.error, e:
logging.debug("Error (%s)" % e)
if tries >= 10:
raise
time.sleep(tries * 0.25)
tries += 1
unused_port_lock.release()
forward_connection(client_socket, sumo_socket, sumo)
client_socket.close()
sumo_socket.close()
logging.debug("Done with proxy mode, killing SUMO")
thread.start_new_thread(subprocess.Popen.wait, (sumo, ))
time.sleep(0.5)
if sumo.returncode == None:
logging.debug("SIGTERM")
os.kill(sumo.pid, signal.SIGTERM)
time.sleep(0.5)
if sumo.returncode == None:
logging.debug("SIGKILL")
os.kill(sumo.pid, signal.SIGKILL)
time.sleep(1)
if sumo.returncode == None:
logging.debug("Warning: SUMO still not dead. Waiting 10 more seconds...")
time.sleep(10)
logging.info("Done running SUMO")
sumo_returncode = sumo.returncode
if sumo_returncode == 0:
sumo_status = "Done."
elif sumo_returncode != None:
sumo_status = "Exited with error code %d" % sumo_returncode
else:
sumo_returncode = -1
sumo_status = "Undef"
except OSError, e:
sumo_status = "Could not start SUMO (%s): %s" % (" ".join(cmd), e)
except exceptions.SystemExit:
sumo_status = "Premature launch script exit"
except exceptions.KeyboardInterrupt:
sumo_status = "Keyboard interrupt."
except socket.error, e:
sumo_status = "Could not connect to SUMO (%s). Might be protected by a personal firewall or crashed before a connection could be established." % e
except:
raise
# statistics
sumo_end = int(time.time())
# close log files
sumoLogOut.close()
sumoLogErr.close()
# read log files
sumoLogOut = open(os.path.join(runpath, 'sumo-launchd.out.log'), 'r')
sumoLogErr = open(os.path.join(runpath, 'sumo-launchd.err.log'), 'r')
sumo_stdout = sumoLogOut.read()
sumo_stderr = sumoLogErr.read()
sumoLogOut.close()
sumoLogErr.close()
# prepare result XML
CDATA_START = '<![CDATA['
CDATA_END = ']]>'
result_xml = '<?xml version="1.0"?>\n'
result_xml += '<status>\n'
result_xml += '\t<%s>%s</%s>\n' % ("exit-code", sumo_returncode, "exit-code")
if sumo_start:
result_xml += '\t<%s>%s</%s>\n' % ("start", sumo_start, "start")
if sumo_end:
result_xml += '\t<%s>%s</%s>\n' % ("end", sumo_end, "end")
if sumo_status:
result_xml += '\t<%s>%s</%s>\n' % ("status", sumo_status, "status")
result_xml += '\t<%s>%s</%s>\n' % ("stdout", CDATA_START + sumo_stdout.replace(CDATA_END, CDATA_END + CDATA_END + CDATA_START) + CDATA_END, "stdout")
result_xml += '\t<%s>%s</%s>\n' % ("stderr", CDATA_START + sumo_stderr.replace(CDATA_END, CDATA_END + CDATA_END + CDATA_START) + CDATA_END, "stderr")
result_xml += '</status>\n'
return result_xml
def set_sumoconfig_option(config_parser, config_xml, section, key, value):
"""
Add or replace named config option (currently ignores given section)
"""
key_nodes = config_xml.getElementsByTagName(key)
if len(key_nodes) > 1:
raise RuntimeError('config file "%s" contains %d <%s> nodes, expected at most 1' % (file_dst_name, key, len(key_nodes)))
elif len(key_nodes) < 1:
key_node = config_parser.createElement(key)
key_node.setAttribute("value", str(value))
config_xml.appendChild(key_node)
else:
key_node = key_nodes[0]
for n in key_node.childNodes:
key_node.removeChild(n)
key_node.setAttribute("value", str(value))
def copy_and_modify_files(basedir, copy_nodes, runpath, remote_port, seed):
"""
Copy (and modify) files, return config file name
"""
config_file_name = None
for copy_node in copy_nodes:
file_src_name = None
file_dst_name = None
file_contents = None
# Read from disk?
if copy_node.hasAttribute("file"):
file_src_name = copy_node.getAttribute("file")
file_src_path = os.path.join(basedir, file_src_name)
# Sanity check
if file_src_name.find("/") != -1:
raise RuntimeError('name of file to copy "%s" contains a "/"' % file_src_name)
if not os.path.exists(file_src_path):
raise RuntimeError('file "%s" does not exist' % file_src_path)
# Read contents
file_handle = open(file_src_path, 'rb')
file_contents = file_handle.read()
file_handle.close()
# By now we need a destination name and contents
if copy_node.hasAttribute("name"):
file_dst_name = copy_node.getAttribute("name")
elif file_src_name:
file_dst_name = file_src_name
else:
raise RuntimeError('<copy> node with no destination name: %s' % copy_node.toxml())
if file_contents == None:
raise RuntimeError('<copy> node with no contents: %s' % copy_node.toxml())
# Is this our config file?
if copy_node.getAttribute("type") == "config":
config_file_name = file_dst_name
config_parser = xml.dom.minidom.parseString(file_contents)
config_xml = config_parser.documentElement
set_sumoconfig_option(config_parser, config_xml, "traci_server", "remote-port", remote_port)
set_sumoconfig_option(config_parser, config_xml, "random_number", "seed", seed)
set_sumoconfig_option(config_parser, config_xml, "random_number", "random", "false")
file_contents = config_xml.toxml()
# Write file into rundir
file_dst_path = os.path.join(runpath, file_dst_name)
file_handle = open(file_dst_path, "wb")
file_handle.write(file_contents)
file_handle.close()
# make sure that we copied a config file
if not config_file_name:
raise RuntimeError('launch config contained no <copy> node with type="config"')
return config_file_name
def handle_launch_configuration(sumo_command, shlex, launch_xml_string, client_socket, keep_temp):
"""
Process launch configuration in launch_xml_string.
"""
# create temporary directory
logging.debug("Creating temporary directory...")
runpath = tempfile.mkdtemp(prefix="sumo-launchd-tmp-")
if not runpath:
raise RuntimeError("Could not create temporary directory")
if not os.path.exists(runpath):
raise RuntimeError('Temporary directory "%s" does not exist, even though it should have been created' % runpath)
logging.debug("Temporary dir is %s" % runpath)
result_xml = None
unused_port_lock = UnusedPortLock()
try:
# parse launch configuration
(basedir, copy_nodes, seed) = parse_launch_configuration(launch_xml_string)
# find remote_port
logging.debug("Finding free port number...")
unused_port_lock.__enter__()
remote_port = find_unused_port()
# remote_port = 10000
logging.debug("...found port %d" % remote_port)
# copy (and modify) files
config_file_name = copy_and_modify_files(basedir, copy_nodes, runpath, remote_port, seed)
# run SUMO
result_xml = run_sumo(runpath, sumo_command, shlex, config_file_name, remote_port, seed, client_socket, unused_port_lock, keep_temp)
finally:
unused_port_lock.__exit__()
# clean up
if not keep_temp:
logging.debug("Cleaning up")
shutil.rmtree(runpath)
else:
logging.debug("Not cleaning up %s" % runpath)
logging.debug('Result: "%s"' % result_xml)
return result_xml
def handle_get_version(conn):
"""
process a "get version" command received on the connection
"""
logging.debug('Got CMD_GETVERSION')
# Send OK response and version info
response = struct.pack("!iBBBiBBii", 4+1+1+1+4 + 1+1+4+4+len(_LAUNCHD_VERSION), 1+1+1+4, _CMD_GET_VERSION, 0x00, 0x00, 1+1+4+4+len(_LAUNCHD_VERSION), _CMD_GET_VERSION, _API_VERSION, len(_LAUNCHD_VERSION)) + _LAUNCHD_VERSION
conn.send(response)
def handle_set_order(conn, order=2):
"""
process a "set order" command received on the connection
"""
# _sendCmd method in connection.py
cmdID = 0x03
varID = None
objID = None
format = "I"
values = order
### _pack method in connection.py
def tmp_pack(format, *values):
packed = bytes()
for f, v in zip(format, values):
packed += struct.pack("!i", int(v))
return packed
packed = tmp_pack(format, values)
length = len(packed) + 1 + 1 # length and command
if varID is not None:
if isinstance(varID, tuple): # begin and end of a subscription
length += 8 + 8 + 4 + len(objID)
else:
length += 1 + 4 + len(objID)
body_string = bytes()
if length <= 255:
body_string += struct.pack("!BB", length, cmdID)
else:
body_string += struct.pack("!BiB", 0, length + 4, cmdID)
if varID is not None:
if isinstance(varID, tuple):
body_string += struct.pack("!dd", *varID)
else:
body_string += struct.pack("!B", varID)
body_string += struct.pack("!i", len(objID)) + objID.encode("latin1")
body_string += packed
# _sendExact method in connection.py
def tmp_sendExact(conn, body_string):
length = struct.pack("!i", len(body_string) + 4)
conn.send(length + body_string)
tmp_sendExact(conn, body_string)
data = conn.recv(65535)
def read_launch_config(conn):
"""
Read (and return) launch configuration from socket
"""
# Get TraCI message length
msg_len_buf = ""
while len(msg_len_buf) < 4:
msg_len_buf += conn.recv(4 - len(msg_len_buf))
msg_len = struct.unpack("!i", msg_len_buf)[0] - 4
logging.debug("Got TraCI message of length %d" % msg_len)
# Get TraCI command length
cmd_len_buf = ""
cmd_len_buf += conn.recv(1)
cmd_len = struct.unpack("!B", cmd_len_buf)[0] - 1
if cmd_len == -1:
cmd_len_buf = ""
while len(cmd_len_buf) < 4:
cmd_len_buf += conn.recv(4 - len(cmd_len_buf))
cmd_len = struct.unpack("!i", cmd_len_buf)[0] - 5
logging.debug("Got TraCI command of length %d" % cmd_len)
# Get TraCI command ID
cmd_id_buf = ""
cmd_id_buf += conn.recv(1)
cmd_id = struct.unpack("!B", cmd_id_buf)[0]
logging.debug("Got TraCI command 0x%x" % cmd_id)
if cmd_id == _CMD_GET_VERSION:
# handle get version command
handle_get_version(conn)
# ...and try reading the launch config again
return read_launch_config(conn)
elif cmd_id != _CMD_FILE_SEND:
raise RuntimeError("Expected CMD_FILE_SEND (0x%x), but got 0x%x" % (_CMD_FILE_SEND, cmd_id))
# Get File name
fname_len_buf = ""
while len(fname_len_buf) < 4:
fname_len_buf += conn.recv(4 - len(fname_len_buf))
fname_len = struct.unpack("!i", fname_len_buf)[0]
fname = conn.recv(fname_len)
if fname != "sumo-launchd.launch.xml":
raise RuntimeError('Launch configuration must be named "sumo-launchd.launch.xml", got "%s" instead.' % fname)
logging.debug('Got CMD_FILE_SEND for "%s"' % fname)
# Get File contents
data_len_buf = ""
while len(data_len_buf) < 4:
data_len_buf += conn.recv(4 - len(data_len_buf))
data_len = struct.unpack("!i", data_len_buf)[0]
data = conn.recv(data_len)
logging.debug('Got CMD_FILE_SEND with data "%s"' % data)
# Send OK response
response = struct.pack("!iBBBi", 4+1+1+1+4, 1+1+1+4, _CMD_FILE_SEND, 0x00, 0x00)
conn.send(response)
return data
def handle_connection(sumo_command, shlex, conn, addr, keep_temp):
"""
Handle incoming connection.
"""
logging.debug("Handling connection from %s on port %d" % addr)
try:
data = read_launch_config(conn)
handle_launch_configuration(sumo_command, shlex, data, conn, keep_temp)
except Exception, e:
logging.error("Aborting on error: %s" % e)
finally:
logging.debug("Closing connection from %s on port %d" % addr)
conn.close()
def wait_for_connections(sumo_command, shlex, sumo_port, bind_address, do_daemonize, do_kill, pidfile, keep_temp):
"""
Open TCP socket, wait for connections, call handle_connection for each
"""
if do_kill:
check_kill_daemon(pidfile)
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind((bind_address, sumo_port))
listener.listen(5)
logging.info("Listening on port %d" % sumo_port)
if do_daemonize:
logging.info("Detaching to run as daemon")
daemonize(pidfile)
try:
while True:
conn, addr = listener.accept()
logging.debug("Connection from %s on port %d" % addr)
thread.start_new_thread(handle_connection, (sumo_command, shlex, conn, addr, keep_temp))
except exceptions.SystemExit:
logging.warning("Killed.")
except exceptions.KeyboardInterrupt:
logging.warning("Keyboard interrupt.")
except:
raise
finally:
# clean up
logging.info("Shutting down.")
listener.close()
def check_kill_daemon(pidfile):
# check pidfile, see if the daemon is still running
try:
pidfileh = open(pidfile, 'r')
old_pid = int(pidfileh.readline())
if old_pid:
logging.info("There might already be a daemon running with PID %d. Sending SIGTERM." % old_pid)
try:
os.kill(old_pid, signal.SIGTERM)
time.sleep(1)
except OSError, e:
pass
pidfileh.close()
except IOError, e:
pass
def daemonize(pidfile):
"""
detach process, keep it running in the background
"""
# fork and exit parent process
try:
child_pid = os.fork()
if child_pid > 0:
# parent can exit
sys.exit(0)
elif child_pid == 0:
# child does nothing
pass
else:
logging.error("Aborting. Failed to fork: %s" % e.strerror)
sys.exit(1);
except OSError, e:
logging.error("Aborting. Failed to fork: %s" % e.strerror)
sys.exit(1)
# get rid of any outside influence
os.setsid()
# fork again to prevent zombies
try:
child_pid = os.fork()
if child_pid > 0:
# parent can exit
sys.exit(0)
elif child_pid == 0:
# child creates PIDFILE
logging.info("Fork successful. PID is %d" % os.getpid())
if pidfile:
pidfileh = open(pidfile, 'w')
pidfileh.write('%d\n' % os.getpid())
pidfileh.close()
atexit.register(os.remove, pidfile)
else:
logging.error("Aborting. Failed to fork: %s" % e.strerror)
sys.exit(1);
except OSError, e:
logging.error("Aborting. Failed to fork: %s" % e.strerror)
sys.exit(1)
def main():
"""
Program entry point when run interactively.
"""
# Option handling
parser = OptionParser()
# parser.add_option("-c", "--command", dest="command", default="sumo", help="run SUMO as COMMAND [default: %default]", metavar="COMMAND")
parser.add_option("-c", "--command", dest="command", default="sumo", help="run SUMO as COMMAND [default: %default]", metavar="COMMAND")
parser.add_option("-s", "--shlex", dest="shlex", default=False, action="store_true", help="treat command as shell string to execute, replace {} with command line parameters [default: no]")
parser.add_option("-p", "--port", dest="port", type="int", default=9999, action="store", help="listen for connections on PORT [default: %default]", metavar="PORT")
parser.add_option("-b", "--bind", dest="bind", default="127.0.0.1", help="bind to ADDRESS [default: %default]", metavar="ADDRESS")
parser.add_option("--sp", "--sumo-port", dest="sumo_port", type="int", default=10002, action="store", help="listen for connections on SUMO PORT [default: %default]", metavar="PORT")
parser.add_option("--sh", "--sumo-host", dest="sumo_host", default="127.0.0.1", help="bind to SUMO HOST [default: %default]", metavar="ADDRESS")
parser.add_option("-L", "--logfile", dest="logfile", default=os.path.join(tempfile.gettempdir(), "sumo-launchd.log"), help="log messages to LOGFILE [default: %default]", metavar="LOGFILE")
parser.add_option("-v", "--verbose", dest="count_verbose", default=0, action="count", help="increase verbosity [default: don't log infos, debug]")
parser.add_option("-q", "--quiet", dest="count_quiet", default=0, action="count", help="decrease verbosity [default: log warnings, errors]")
parser.add_option("-d", "--daemon", dest="daemonize", default=False, action="store_true", help="detach and run as daemon [default: no]")
parser.add_option("-k", "--kill", dest="kill", default=False, action="store_true", help="send SIGTERM to running daemon first [default: no]")
parser.add_option("-P", "--pidfile", dest="pidfile", default=os.path.join(tempfile.gettempdir(), "sumo-launchd.pid"), help="if running as a daemon, write pid to PIDFILE [default: %default]", metavar="PIDFILE")
parser.add_option("-t", "--keep-temp", dest="keep_temp", default=False, action="store_true", help="keep all temporary files [default: no]")
(options, args) = parser.parse_args()
_LOGLEVELS = (logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG)
loglevel = _LOGLEVELS[max(0, min(1 + options.count_verbose - options.count_quiet, len(_LOGLEVELS)-1))]
# catch SIGTERM to exit cleanly when we're kill-ed
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
# configure logging
logging.basicConfig(filename=options.logfile, level=loglevel)
if not options.daemonize:
logging.getLogger().addHandler(logging.StreamHandler())
logging.debug("Logging to %s" % options.logfile)
if args:
logging.warning("Superfluous command line arguments: \"%s\"" % " ".join(args))
# ----- START: my code: read sumo host -----
global _SUMO_PORT
global _SUMO_HOST
_SUMO_PORT = options.sumo_port
_SUMO_HOST = options.sumo_host
# ----- END: my code: read sumo host -----
# this is where we'll spend our time
wait_for_connections(options.command, options.shlex, options.port, options.bind, options.daemonize, options.kill, options.pidfile, options.keep_temp)
# Start main() when run interactively
if __name__ == '__main__':
main()
#!/usr/bin/env python2
|
StarcoderdataPython
|
8153
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import mica.archive.asp_l1
mica.archive.asp_l1.main()
|
StarcoderdataPython
|
3387721
|
<filename>tests/test_gizmo_query.py
import unittest
from pyley import GraphObject
class GizmoQueryTests(unittest.TestCase):
def setUp(self):
self.opts = dict(url='http://localhost:64210/api/v1/query/gizmo')
def test_vertex_query(self):
g = GraphObject()
query = g.Vertex()
self.assertEqual(query.build(), "g.V()")
def test_vertex_query_with_parameters(self):
g = GraphObject()
query = g.V("<NAME>")
actual = query.build()
self.assertEqual(actual, "g.V('<NAME>')")
def test_morphism_query(self):
g = GraphObject()
query = g.Morphism()
self.assertEqual(query.build(), "g.Morphism()")
def test_out_query(self):
g = GraphObject()
query = g.V().Out('name')
actual = query.build()
self.assertEqual(actual, "g.V().Out('name')")
def test_out_query_with_predicate(self):
g = GraphObject()
query = g.V().Out(g.Vertex())
actual = query.build()
self.assertEqual(actual, "g.V().Out(g.V())")
def test_out_query_with_predicate_as_dict_and_label(self):
g = GraphObject()
query = g.V().Out(['foo', 'bar'], 'qux')
actual = query.build()
self.assertEqual(actual, "g.V().Out(['foo', 'bar'], 'qux')")
def test_out_query_with_predicate_as_none_and_label_as_dict(self):
g = GraphObject()
query = g.V().Out(None, ['foo', 'bar'])
actual = query.build()
self.assertEqual(actual, "g.V().Out(null, ['foo', 'bar'])")
def test_in_query(self):
g = GraphObject()
query = g.V().In("name").All()
actual = query.build()
self.assertEqual(actual, "g.V().In('name').All()")
def test_both(self):
g = GraphObject()
query = g.V("F").Both("follows")
actual = query.build()
print(actual)
self.assertEqual(actual, "g.V('F').Both('follows')")
def test_is(self):
g = GraphObject()
query = g.V().Is('B', 'C')
actual = query.build()
self.assertEqual(actual, "g.V().Is('B', 'C')")
def test_tag(self):
g = GraphObject()
query = g.V().Tag('B', 'C')
actual = query.build()
self.assertEqual(actual, 'g.V().Tag(["B", "C"])')
def test_save(self):
g = GraphObject()
query = g.V().Save('B', 'C')
actual = query.build()
self.assertEqual(actual, "g.V().Save('B', 'C')")
def test_back(self):
g = GraphObject()
query = g.V().Back('B')
actual = query.build()
self.assertEqual(actual, "g.V().Back('B')")
def test_all_query(self):
g = GraphObject()
query = g.V("<NAME>").All()
actual = query.build()
self.assertEqual(actual, "g.V('<NAME>').All()")
def test_has_query(self):
g = GraphObject()
query = g.V().Has("name", "Casablanca").All()
actual = query.build()
self.assertEqual(actual, "g.V().Has('name', 'Casablanca').All()")
def test_complex_query1(self):
g = GraphObject()
query = g.V().Has("name", "Casablanca") \
.Out("/film/film/starring") \
.Out("/film/performance/actor") \
.Out("name") \
.All()
actual = query.build()
self.assertEqual(actual, "g.V().Has('name', 'Casablanca')"
".Out('/film/film/starring')"
".Out('/film/performance/actor')"
".Out('name')"
".All()")
def test_follow_with_morphism_path_and_typed_query(self):
g = GraphObject()
film_to_actor = g.Morphism().Out("/film/film/starring").Out("/film/performance/actor")
query = g.V().Has("name", "Casablanca").Follow(film_to_actor).Out("name").All()
actual = query.build()
self.assertEqual(actual, "g.V().Has('name', 'Casablanca')"
".Follow("
"g.Morphism().Out('/film/film/starring').Out('/film/performance/actor')"
").Out('name')"
".All()")
def test_follow_with_morphism_path_and_str_query(self):
g = GraphObject()
film_to_actor = g.Morphism().Out("/film/film/starring").Out("/film/performance/actor")
query = g.V().Has("name", "Casablanca").Follow(film_to_actor.build()).Out("name").All()
actual = query.build()
self.assertEqual(actual, "g.V().Has('name', 'Casablanca')"
".Follow("
"g.Morphism().Out('/film/film/starring').Out('/film/performance/actor')"
").Out('name')"
".All()")
def test_follow_with_vertex(self):
g = GraphObject()
with self.assertRaises(Exception):
g.V().Follow(g.V()).build()
def test_union(self):
g = GraphObject()
query = g.Vertex().Union(g.Vertex())
actual = query.build()
self.assertEqual(actual, "g.V().Union(g.V())")
def test_intersect(self):
g = GraphObject()
query = g.Vertex().Intersect(g.Vertex())
actual = query.build()
self.assertEqual(actual, "g.V().Intersect(g.V())")
def test_get_limit(self):
g = GraphObject()
query = g.Vertex().GetLimit(5)
actual = query.build()
self.assertEqual(actual, "g.V().GetLimit(5)")
def test_emit(self):
g = GraphObject()
query = g.Emit({'name': 'John', 'lastName': 'DOE', 'age': 25})
self.assertEqual(query, 'g.Emit({"age": 25, "lastName": "DOE", "name": "John"})')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
2978
|
<filename>src/printReport.py
from __future__ import print_function
from connection import *
from jinja2 import Environment, FileSystemLoader
import webbrowser
def print_report(id):
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("src/template.html")
cursor = db.cursor(MySQLdb.cursors.DictCursor)
sql = "SELECT e.*, b.*, d.`depName` "
sql += "FROM `employees` e, `baccounts` b, `departments` d "
sql +="WHERE e.`empID` = b.`empdb_empID` "
sql +="AND e.`depDB_depID` = d.`depID` "
sql +="AND e.`empID` = '"+ id +"'"
# print(sql)
cursor.execute(sql)
result = cursor.fetchall()
# print(result[0])
result = result[0]
print(result)
template_vars = {"empID" : result['empID'],
"firstName" : result['firstName'],
"lastName" : result['lastName'],
"address" : result['address'],
"pin" : result['pin'],
"state" : result['state'],
"adharID" : result['adharID'],
"panID" : result['panID'],
"designation" : result['designation'],
"unit" : result['unit'],
"email" : result['email'],
"mobile" : result['mobile'],
"depName" : result['depName'],
"IFSC" : result['IFSC'],
"ACNo" : result['ACNo'],
"BranchAdd" : result['BranchAdd']
}
content = template.render(template_vars)
with open('print.html', 'w') as static_file:
static_file.write(content)
webbrowser.open_new_tab('print.html')
# self.entry_text(self.entry_name, result['firstName']+" "+result['lastName'] )
# self.entry_text(self.entry_EmpID, result['empID'])
# self.entry_text(self.entry_EmpName, result['firstName']+" "+result['lastName'])
# self.entry_text(self.entry_personalno, result['empID'])
# self.entry_text(self.entry_address,result['address'] )
# self.entry_text(self.entry_pin, result['pin'])
# self.entry_text(self.entry_state, result['state'])
# self.entry_text(self.entry_adhar, result['adharID'])
# self.entry_text(self.entry_pan, result['panID'])
# self.entry_text(self.entry_designation, result['designation'])
# self.entry_text(self.entry_unit, result['unit'])
# self.entry_text(self.entry_emailid, result['email'])
# self.entry_text(self.entry_mobile, result['mobile'])
# self.entry_text(self.entry_department, result['depName'])
# self.entry_text(self.entry_ifsc, result['IFSC'])
# self.entry_text(self.enrtry_acno, result['ACNo'])
# self.entry_text(self.entry_branch, result['BranchAdd'])
|
StarcoderdataPython
|
1762189
|
<filename>benchmark/operations/test/test_wait.py
# Copyright 2015 ClusterHQ Inc. See LICENSE file for details.
from twisted.internet.task import Clock
from flocker.testtools import TestCase
from benchmark.operations import Wait
class WaitOperationTests(TestCase):
"""
Test Wait operation
"""
def test_wait(self):
"""
Wait operation fires after specified time.
"""
seconds = 10
clock = Clock()
op = Wait(clock, None, wait_seconds=seconds)
probe = op.get_probe()
d = probe.run()
d.addCallback(lambda ignored: probe.cleanup)
self.assertNoResult(d)
# Time passes
clock.advance(seconds)
self.successResultOf(d)
|
StarcoderdataPython
|
3220460
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from .base_model_ import Model
from .. import util
class DocumentInfoPageEntry(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, height: float=None, width: float=None): # noqa: E501
"""DocumentInfoPageEntry - a model defined in Swagger
:param height: The height of this DocumentInfoPageEntry. # noqa: E501
:type height: float
:param width: The width of this DocumentInfoPageEntry. # noqa: E501
:type width: float
"""
self.swagger_types = {
'height': float,
'width': float
}
self.attribute_map = {
'height': 'height',
'width': 'width'
}
self._height = height
self._width = width
@classmethod
def from_dict(cls, dikt) -> 'DocumentInfoPageEntry':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The DocumentInfoPageEntry of this DocumentInfoPageEntry. # noqa: E501
:rtype: DocumentInfoPageEntry
"""
return util.deserialize_model(dikt, cls)
@property
def height(self) -> float:
"""Gets the height of this DocumentInfoPageEntry.
The height of the page, in PDF units. # noqa: E501
:return: The height of this DocumentInfoPageEntry.
:rtype: float
"""
return self._height
@height.setter
def height(self, height: float):
"""Sets the height of this DocumentInfoPageEntry.
The height of the page, in PDF units. # noqa: E501
:param height: The height of this DocumentInfoPageEntry.
:type height: float
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`") # noqa: E501
self._height = height
@property
def width(self) -> float:
"""Gets the width of this DocumentInfoPageEntry.
The width of the page, in PDF units. # noqa: E501
:return: The width of this DocumentInfoPageEntry.
:rtype: float
"""
return self._width
@width.setter
def width(self, width: float):
"""Sets the width of this DocumentInfoPageEntry.
The width of the page, in PDF units. # noqa: E501
:param width: The width of this DocumentInfoPageEntry.
:type width: float
"""
if width is None:
raise ValueError("Invalid value for `width`, must not be `None`") # noqa: E501
self._width = width
|
StarcoderdataPython
|
1655829
|
#!/usr/bin/env python
import sh
import sys
import flask.ext.script
import server.app as server
import wsgi
instance = server.flask_instance
manager = flask.ext.script.Manager(instance)
@manager.command
def run():
wsgi.run()
@manager.command
def docker_build():
sh.docker.build('-t', 'webapp', '.', _out=sys.stdout)
@manager.command
def docker_run(environment):
sh.docker.run('-d',
'-e', 'ENVIRONMENT=%s' % environment,
'-p', '127.0.0.1:80:80',
'webapp')
if __name__ == '__main__':
manager.run()
|
StarcoderdataPython
|
42321
|
# proxy module
from __future__ import absolute_import
from mayavi.filters.cell_derivatives import *
|
StarcoderdataPython
|
3242771
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from . import views
urlpatterns=patterns('graficas.views',
url(r'^$', 'graficas', name='graficas'),
)
|
StarcoderdataPython
|
1630062
|
# Copyright (c) 2020 NVIDIA Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Test if config files are imported correctly."""
import hydra
from omegaconf import DictConfig
from context import graspsampling
from graspsampling import utilities, sampling
@hydra.main(config_path='../conf', config_name="config")
def my_app(cfg: DictConfig) -> None:
"""Test if config files are imported correctly."""
print(cfg.pretty())
object_mesh = utilities.instantiate_mesh(**cfg.object)
gripper = hydra.utils.instantiate(cfg.gripper)
sampler = hydra.utils.instantiate(cfg.sampler, gripper=gripper, object_mesh=object_mesh)
results = sampling.collision_free_grasps(gripper, object_mesh, sampler, cfg.number_of_grasps)
output = hydra.utils.instantiate(cfg.output)
output.write(results)
if __name__ == "__main__":
# load configuration via hydra
my_app()
|
StarcoderdataPython
|
1668680
|
from collections import deque
import math
import sys
import time
class Timer:
def __init__(self, f=sys.stdout):
self._start_time = time.monotonic()
self._time_history = deque([])
self._file = f
def _get_current_time(self):
return time.monotonic() - self._start_time
def add_received_data(self, data):
current_time = self._get_current_time()
current_second = math.floor(current_time)
while (current_second >= len(self._time_history)):
if (len(self._time_history) > 0):
print(f'{len(self._time_history)}, {self._time_history[-1]}', file=self._file)
else:
print('Second, Received bits', file=self._file)
self._time_history.append(0)
self._time_history.append(self._time_history.pop() + len(data) * 8)
|
StarcoderdataPython
|
104319
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import pprint
if len(sys.argv) < 2:
print '''Please provide a top-level path. Usage:
% find-directories-without-sfv.py .
OR
% find-directories-without-sfv.py /somewhere'''
sys.exit(1)
rootPath = sys.argv[1]
for path in os.walk(rootPath):
dirName = path[0]
dirContents = path[2]
hasSFV = False
for item in dirContents:
if 'sfv' in item:
hasSFV = True
if not hasSFV:
print dirName
|
StarcoderdataPython
|
183263
|
<gh_stars>0
nu = int(input('Digite um número: '))
an = nu - 1
su = nu + 1
print('O sucessor de {} é {} e o\nsucessor é {}'.format(nu,an,su))
|
StarcoderdataPython
|
27551
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01a_datasets_download.ipynb (unless otherwise specified).
__all__ = ['get_cifar10', 'get_oxford_102_flowers', 'get_cub_200_2011']
# Internal Cell
import glob
import json
from pathlib import Path
import os
import subprocess
import tarfile
import urllib
import zlib
# Internal Cell
def _download_url(url, root, filename=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
if not os.path.isfile(fpath):
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
else:
print(f'File {filename} already exists, skip download.')
# Internal Cell
def _extract_tar(tar_path, output_dir):
try:
print('Extracting...')
with tarfile.open(tar_path) as tar:
tar.extractall(output_dir)
except (tarfile.TarError, IOError, zlib.error) as e:
print('Failed to extract!', e)
# Cell
def get_cifar10(output_dir):
output_dir = Path(output_dir)
dataset_dir = output_dir / 'cifar10'
_download_url(url='https://s3.amazonaws.com/fast-ai-imageclas/cifar10.tgz', root=output_dir)
if not dataset_dir.is_dir():
_extract_tar(output_dir / 'cifar10.tgz', output_dir)
else:
print(f'Directory {dataset_dir} already exists, skip extraction.')
print('Generating train/test data..')
imdir_train = dataset_dir / 'train'
imdir_test = dataset_dir / 'test'
# split train/test
train = [Path(p) for p in glob.glob(f'{imdir_train}/*/*')]
test = [Path(p) for p in glob.glob(f'{imdir_test}/*/*')]
# generate data for annotations.json
# {'image-file.jpg': ['label1.jpg']}
annotations_train = dict((str(p), [f'{p.parts[-2]}.jpg']) for p in train)
annotations_test = dict((str(p), [f'{p.parts[-2]}.jpg']) for p in test)
train_path = dataset_dir / 'annotations_train.json'
test_path = dataset_dir / 'annotations_test.json'
with open(train_path, 'w') as f:
json.dump(annotations_train, f)
with open(test_path, 'w') as f:
json.dump(annotations_test, f)
print("Done")
return train_path, test_path
# Cell
def get_oxford_102_flowers(output_dir):
output_dir = Path(output_dir)
dataset_dir = output_dir / 'oxford-102-flowers'
_download_url(url='https://s3.amazonaws.com/fast-ai-imageclas/oxford-102-flowers.tgz', root=output_dir)
if not dataset_dir.is_dir():
_extract_tar(output_dir / 'oxford-102-flowers.tgz', output_dir)
else:
print(f'Directory {dataset_dir} already exists, skip extraction.')
print('Generating train/test data..')
with open(dataset_dir / 'train.txt', 'r') as f:
annotations_train = dict(tuple(line.split()) for line in f)
annotations_train = {str(dataset_dir / k): [v+'.jpg'] for k, v in annotations_train.items()}
with open(dataset_dir / 'test.txt', 'r') as f:
annotations_test = dict(tuple(line.split()) for line in f)
annotations_test = {str(dataset_dir / k): [v+'.jpg'] for k, v in annotations_test.items()}
train_path = dataset_dir / 'annotations_train.json'
test_path = dataset_dir / 'annotations_test.json'
with open(train_path, 'w') as f:
json.dump(annotations_train, f)
with open(test_path, 'w') as f:
json.dump(annotations_test, f)
print("Done")
return train_path, test_path
# Cell
def get_cub_200_2011(output_dir):
output_dir = Path(output_dir)
dataset_dir = output_dir / 'CUB_200_2011'
_download_url(url='https://s3.amazonaws.com/fast-ai-imageclas/CUB_200_2011.tgz', root=output_dir)
if not dataset_dir.is_dir():
_extract_tar(output_dir / 'CUB_200_2011.tgz', output_dir)
else:
print(f'Directory {dataset_dir} already exists, skip extraction.')
print('Generating train/test data..')
with open(dataset_dir / 'images.txt','r') as f:
image_id_map = dict(tuple(line.split()) for line in f)
with open(dataset_dir / 'classes.txt','r') as f:
class_id_map = dict(tuple(line.split()) for line in f)
with open(dataset_dir / 'train_test_split.txt','r') as f:
splitter = dict(tuple(line.split()) for line in f)
# image ids for test/train
train_k = [k for k, v in splitter.items() if v == '0']
test_k = [k for k, v in splitter.items() if v == '1']
with open(dataset_dir / 'image_class_labels.txt','r') as f:
anno_ = dict(tuple(line.split()) for line in f)
annotations_train = {str(dataset_dir / 'images' / image_id_map[k]): [class_id_map[v]+'.jpg'] for k, v in anno_.items() if k in train_k}
annotations_test = {str(dataset_dir / 'images' / image_id_map[k]): [class_id_map[v]+'.jpg'] for k, v in anno_.items() if k in test_k}
train_path = dataset_dir / 'annotations_train.json'
test_path = dataset_dir / 'annotations_test.json'
with open(train_path, 'w') as f:
json.dump(annotations_train, f)
with open(test_path, 'w') as f:
json.dump(annotations_test, f)
print("Done")
return train_path, test_path
|
StarcoderdataPython
|
3296877
|
<filename>bnpy/ioutil/CountReader.py
from builtins import *
import sys
import os
import scipy.sparse
import numpy as np
from bnpy.util import argsort_bigtosmall_stable
def loadKeffForTask(
taskpath,
effCountThr=0.01,
MIN_PRESENT_COUNT=1e-10,
**kwargs):
''' Load effective number of clusters used at each checkpoint.
Returns
-------
Keff : 1D array, size nCheckpoint
'''
effCountThr = np.maximum(effCountThr, MIN_PRESENT_COUNT)
CountMat, Info = loadCountHistoriesForTask(taskpath,
MIN_PRESENT_COUNT=MIN_PRESENT_COUNT)
return np.sum(CountMat >= effCountThr, axis=1)
def loadCountHistoriesForTask(
taskpath,
sortBy=None,
MIN_PRESENT_COUNT=1e-10):
''' Load sparse matrix of counts for all clusters used throughout task.
Returns
-------
AllCountMat : 2D array, nCheckpoint x nTotal
Info : dict
'''
idpath = os.path.join(taskpath, 'ActiveIDs.txt')
ctpath = os.path.join(taskpath, 'ActiveCounts.txt')
fid = open(idpath, 'r')
fct = open(ctpath, 'r')
data = list()
colids = list()
rowids = list()
for ii, idline in enumerate(fid.readlines()):
idstr = str(idline.strip())
ctstr = str(fct.readline().strip())
idvec = np.asarray(idstr.split(' '), dtype=np.int32)
ctvec = np.asarray(ctstr.split(' '), dtype=np.float)
data.extend(ctvec)
colids.extend(idvec)
rowids.extend( ii * np.ones(idvec.size))
# Identify columns by unique ids
allUIDs = np.unique(colids)
compactColIDs = -1 * np.ones_like(colids)
for pos, u in enumerate(allUIDs):
mask = colids == u
compactColIDs[mask] = pos
assert compactColIDs.min() >= 0
# CountMat : sparse matrix of active counts at each checkpoint
# Each row gives count (or zero if eliminated) at single lap
data = np.asarray(data)
np.maximum(data, MIN_PRESENT_COUNT, out=data)
ij = np.vstack([rowids, compactColIDs])
CountMat = scipy.sparse.csr_matrix((data, ij))
CountMat = CountMat.toarray()
assert allUIDs.size == CountMat.shape[1]
# Split all columns into two sets: active and eliminated
nCol = CountMat.shape[1]
elimCols = np.flatnonzero(CountMat[-1, :] < MIN_PRESENT_COUNT)
activeCols = np.setdiff1d(np.arange(nCol), elimCols)
nElimCol = len(elimCols)
nActiveCol = len(activeCols)
ElimCountMat = CountMat[:, elimCols]
ActiveCountMat = CountMat[:, activeCols]
elimUIDs = allUIDs[elimCols]
activeUIDs = allUIDs[activeCols]
# Fill out info dict
Info = dict(
CountMat=CountMat,
allUIDs=allUIDs,
ActiveCountMat=ActiveCountMat,
ElimCountMat=ElimCountMat,
activeCols=activeCols,
elimCols=elimCols,
activeUIDs=activeUIDs,
elimUIDs=elimUIDs)
if not isinstance(sortBy, str) or sortBy.lower().count('none'):
return CountMat, Info
if sortBy.lower().count('finalorder'):
rankedActiveUIDs = idvec
raise ValueError("TODO")
elif sortBy.lower().count('countvalues'):
## Sort columns from biggest to smallest (at last chkpt)
rankedActiveIDs = argsort_bigtosmall_stable(ActiveCountMat[-1,:])
else:
raise ValueError("TODO")
# Sort active set by size at last snapshot
ActiveCountMat = ActiveCountMat[:, rankedActiveIDs]
activeUIDs = activeUIDs[rankedActiveIDs]
activeCols = activeCols[rankedActiveIDs]
# Sort eliminated set by historical size
rankedElimIDs = argsort_bigtosmall_stable(ElimCountMat.sum(axis=0))
ElimCountMat = ElimCountMat[:, rankedElimIDs]
elimUIDs = elimUIDs[rankedElimIDs]
elimCols = elimCols[rankedElimIDs]
Info['activeUIDs'] = activeUIDs
Info['activeCols'] = activeCols
Info['elimUIDs'] = elimUIDs
Info['elimCols'] = elimCols
return ActiveCountMat, ElimCountMat, Info
def LoadActiveIDsForTaskFromLap(taskpath, queryLap='final'):
''' Load vector of active cluster UIDs for specific lap
Essentially reads a single line of the ActiveIDs.txt file from taskpath
Returns
-------
idvec : 1D array, size K
where K is number of clusters active at chosen lap
'''
lappath = os.path.join(taskpath, 'laps.txt')
laps = np.loadtxt(lappath)
if queryLap is not None and queryLap != 'final':
if queryLap not in laps:
raise ValueError('Target lap not found.')
idpath = os.path.join(taskpath, 'ActiveIDs.txt')
with open(idpath, 'r') as f:
for ii, curLap in enumerate(laps):
idstr = f.readline().strip()
if curLap == queryLap or (curLap == laps[-1] and queryLap == 'final'):
idvec = np.asarray(idstr.split(' '), dtype=np.int32)
return idvec
if __name__ == '__main__':
tpath = "/data/liv/xdump/BerkPatchB1/billings-alg=bnpyHDPbirthmerge-lik=ZeroMeanGauss-ECovMat=diagcovdata-sF=0.1-K=1-initname=bregmankmeans-nBatch=1/1/"
loadCountHistoriesForTask(tpath)
|
StarcoderdataPython
|
3364099
|
<gh_stars>100-1000
from securify.solidity import compile_attributed_ast_from_string
from securify.staticanalysis.factencoder import encode
from securify.staticanalysis.souffle.souffle import is_souffle_available, generate_fact_files, run_souffle
if __name__ == '__main__':
print(is_souffle_available())
# language=Solidity
test_program = """
pragma solidity ^0.5.0;
contract A {
uint state = 0;
function test(uint i) public returns (uint) {
uint a = 4;
if (a==4) {
a+=i++;
} else {
//state += i;
//return i;
}
return test(a);
}
}
"""
cfg = compile_attributed_ast_from_string(test_program).cfg
fact_dir = "facts"
facts = encode(cfg.contracts[0])
generate_fact_files(facts, fact_dir)
print(run_souffle("test.dl", fact_dir=fact_dir)[0])
|
StarcoderdataPython
|
4842032
|
<gh_stars>1-10
from fractions import Fraction
from math import ceil, log
def eval_kraft_mcmillan(radix, *args, **kwargs):
return sum(map(lambda l: Fraction(1, radix ** l), args))
def eval_kraft_mcmillan_length(k, radix, *args, **kwargs):
curr_k = eval_kraft_mcmillan(radix, *args)
length = int(log(k - curr_k) / log(1 / radix))
return length
def eval_kraft_mcmillan_min_length(radix, *args, **kwargs):
curr_k = eval_kraft_mcmillan(radix, *args)
length = ceil(log(1 - curr_k) / log(1 / radix))
return length
def eval_kraft_mcmillan_radix(*args, **kwargs):
for i in range(2, 100):
k = eval_kraft_mcmillan(i, *args)
if k <= 1:
return i
return -1
|
StarcoderdataPython
|
3332067
|
<gh_stars>0
#way to upload image: endpoint
#way to save the image
#function to make prediction on the image
#show the results
import torch
import torchvision
from torchvision import transforms
from PIL import Image
import io
import os
from flask import Flask, request, render_template
app= Flask(__name__)
UPLOAD_FOLDER="/home/isack/Desktop/isack/Diabetic Retnopathy/flask_API/static"
#our class map
class_map={
0:"Grade 0",
1:"Grade 1",
2:"Grade 2",
3:"Grade 3",
4:"Grade 4",
}
MODEL_PATH="/home/isack/Desktop/isack/Diabetic Retnopathy/saved_models/model_resnet101.pth"
def transform_image(image_byte):
my_transform=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image=Image.open(io.BytesIO(image_byte))
return my_transform(image).unsqueeze(0)
def get_predict(model_path, image_byte,class_map):
tensor=transform_image(image_byte=image_byte)
my_model=torch.load(model_path)
my_model.eval()
outputs=my_model(tensor)
_,pred=torch.max(outputs, 1)
pred_idx=pred.item()
class_name=class_map[pred_idx]
return (pred_idx, class_name)
@app.route("/", methods=['GET', 'POST'])
def upload_predict():
if request.method =="POST":
image_file=request.files["image"]
if image_file:
image_location = os.path.join(
UPLOAD_FOLDER,
image_file.filename
)
image_file.save(image_location)
with open(image_location, 'rb') as f:
img_byte=f.read()
pred_idx, class_name=get_predict(MODEL_PATH, img_byte, class_map)
return render_template('result.html', prediction=pred_idx, image=class_name, image_loc=image_file.filename)
return render_template('index.html', prediction= "No Prediction", image=None, image_loc=None)
if __name__=='__main__':
app.run(port= 13000, debug=True)
|
StarcoderdataPython
|
9457
|
<reponame>emissible/emissilbe
#from . import context
#from . import test_NNModels
#from . import test_data_extract
#from . import test_speedcom
#from . import test_utilities
|
StarcoderdataPython
|
3382621
|
from zenoss.protocols.protobufs.zep_pb2 import SEVERITY_INFO
# Get attributes from the event object
ap_name = str(getattr(evt, 'wlsxTrapAPLocation.0', ''))
radio = str(getattr(evt, 'wlsxTrapAPRadioNumber.0', ''))
prev_chan = str(getattr(evt, 'wlsxTrapAPPrevChannel.0', ''))
curr_chan = str(getattr(evt, 'wlsxTrapAPChannel.0', ''))
reason = str(getattr(evt, 'wlsxTrapAPARMChangeReason.0', ''))
# Re-model Virtual Controller to get new channel assignment
device.collectDevice(background=True)
if ap_name:
# AI-AP-MIB::ArubaARMChangeReason
change_reasons = {
# radarDetected
'1': 'radar detected',
# radarCleared
'2': 'radar cleared',
# txHang
'3': 'transmit hang',
# txHangCleared
'4': 'transmit hang cleared',
# fortyMhzIntol
'5': 'wide-channel intolerance',
# cancel40mhzIntol
'6': 'wide-channel interference cleared',
# fortMhzAlign
'7': 'wide-channel alignment',
# armInterference
'8': 'interference',
# armInvalidCh
'9': 'invalid channel',
# armErrorThresh
'10': 'error threshold',
# armNoiseThreh
'11': 'noise threshold',
# armEmptyCh
'12': 'empty channel',
# armRogueCont
'13': 'rogue containment',
# armDecreasePower
'14': 'power decrease',
# armIncreasePower
'15': 'power increase',
# armTurnOffRadio
'16': 'radio turn-off',
# armTurnOnRadio
'17': 'radio turn-on',
}
# Component
evt.component = ('{0} Radio {1}'.format(ap_name, int(radio) - 1)
if radio and radio.isdigit() else ap_name)
# Severity
evt.severity = SEVERITY_INFO
# Summary
evt.summary = 'Channel changed'
if curr_chan:
evt.summary += ' to {0}'.format(curr_chan)
if prev_chan:
evt.summary += ' from {0}'.format(prev_chan)
if reason:
evt.summary += ' due to {0}'.format(change_reasons.get(
reason,
'unknown cause'
))
else:
evt._action = 'drop'
|
StarcoderdataPython
|
4815025
|
<reponame>ARte-team/ARte
#!/usr/bin/env python3
import os
import sys
import argparse
import pathlib
import posixpath
import io
import itertools
import mmap
import shutil
from binascii import hexlify
C_HEADER = """/* This file was automatically generated by mkconstfs2.
* !!!! DO NOT EDIT !!!!!
*/
#include <stdint.h>
#include "fs/constfs.h"
"""
FILE_TEMPLATE = """ {{
.path = "{target_name}",
.data = {buff_name},
.size = sizeof({buff_name})
}},
"""
C_FOOTER = """
static const constfs_t _fs_data = {{
.files = _files,
.nfiles = sizeof(_files) / sizeof(_files[0]),
}};
vfs_mount_t {constfs_name} = {{
.fs = &constfs_file_system,
.mount_point = "{mount_pount}",
.private_data = (void *)&_fs_data,
}};
"""
FILES_DECL = """
static const constfs_file_t _files[] = {
"""
BLOB_DECL = """
/** {fname} **/
static const uint8_t {varname}[] = {{
"""
def _relpath_p(path, start):
return posixpath.relpath(pathlib.Path(os.path.abspath(path)).as_posix(),
pathlib.Path(os.path.abspath(start)).as_posix())
def mkconstfs(files, root_path, mount_point, constfs_name):
"""Generate a C file containing a constant file system
Return
------
chunks: Iterator yielding fragments of the of the output file.
"""
filemap = {f: (_mkident(i), _relpath_p(f, root_path))
for i, f in enumerate(files)}
yield C_HEADER
yield from itertools.chain.from_iterable(
print_file_data(local_f, *f_data) for local_f, f_data in filemap.items())
yield FILES_DECL
yield from (FILE_TEMPLATE.format(target_name=_addroot(relp),
buff_name=ident)
for ident, relp in sorted(filemap.values()))
yield "};\n"
yield C_FOOTER.format(constfs_name=constfs_name, mount_pount=mount_point)
def _addroot(fname):
return "/" + fname if not fname.startswith("/") else fname
def _mkident(k):
return "_file{:02X}".format(k)
def print_file_data(local_fname, varname, target_fname=""):
"""Convert a file into a static C array:
Parameters
----------
local_fname: real Path (where the file is on this machine's fs)
target_fname: name that the file will have in the constfs.
output_file: File-like object where the array will be written.
Return
------
chunks: Iterator yielding fragments of the of the output text.
"""
yield BLOB_DECL.format(fname=target_fname, varname=varname)
def byte2s(b):
return "0x{},".format(hexlify(b).decode('utf-8'))
def chunk(iterable, blocksize):
"""Break a single iterable into chunks of maximum size 'blocksize'"""
return (x for _, x in itertools.groupby(enumerate(iterable),
lambda x: x[0]//blocksize))
with open(local_fname, 'rb') as f, mmap.mmap(f.fileno(), 0,
access=mmap.ACCESS_READ
) as bfile:
yield from map(lambda x: x[1],
itertools.chain.from_iterable(
map(lambda l: itertools.chain(l, [(0, "\n")]),
chunk(map(byte2s, bfile), 16)
)
)
)
yield "};\n"
def main():
parser = argparse.ArgumentParser(
description="Embed files into a constant file system")
parser.add_argument("-m", '--mount', metavar="mountpoint",
help="Where to mount the resulting fs", default="/")
parser.add_argument("-o", '--output', metavar="output_file",
help="Write the output to a file instead of stdout. "
"The file is only written if the command is successful "
"(i.e. there is no partial output")
parser.add_argument("-r", '--root', metavar="root_base_path",
type=pathlib.Path,
help="Paths on the constf will be generated for the real "
"path of the files by considering this path to be the root "
"By default the current directory (.) is used",
default=pathlib.Path())
parser.add_argument("name", help="Name for the vfs_mount_t structure")
parser.add_argument("files", nargs="+", type=pathlib.Path,
help="Files to be included.")
ns = parser.parse_args()
f_chunks = mkconstfs(ns.files, ns.root, ns.mount, ns.name)
if ns.output:
tmp_out = io.StringIO()
else:
tmp_out = sys.stdout
tmp_out.writelines(f_chunks)
if ns.output:
with open(ns.output, "w+") as f:
tmp_out.seek(0)
shutil.copyfileobj(tmp_out, f)
return 0
if __name__ == "__main__":
exit(main())
|
StarcoderdataPython
|
3228693
|
<filename>bitcoinpy/cache.py
# Cache.py
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
class Cache(object):
def __init__(self, max=1000):
self.d = {}
self.l = []
self.max = max
def put(self, k, v):
self.d[k] = v
self.l.append(k)
while (len(self.l) > self.max):
kdel = self.l[0]
del self.l[0]
del self.d[kdel]
def get(self, k):
try:
return self.d[k]
except:
return None
def exists(self, k):
return k in self.d
|
StarcoderdataPython
|
132680
|
import socket
import sys
import threading
import time
import uuid
import unittest
from mock import patch
from nose import SkipTest
from nose.tools import eq_
from nose.tools import raises
from kazoo.testing import KazooTestCase
from kazoo.exceptions import (
AuthFailedError,
BadArgumentsError,
ConfigurationError,
ConnectionClosedError,
ConnectionLoss,
InvalidACLError,
NoAuthError,
NoNodeError,
NodeExistsError,
SessionExpiredError,
)
from kazoo.protocol.connection import _CONNECTION_DROP
from kazoo.protocol.states import KeeperState, KazooState
from kazoo.tests.util import TRAVIS_ZK_VERSION
if sys.version_info > (3, ): # pragma: nocover
def u(s):
return s
else: # pragma: nocover
def u(s):
return unicode(s, "unicode_escape")
class TestClientTransitions(KazooTestCase):
def test_connection_and_disconnection(self):
states = []
rc = threading.Event()
@self.client.add_listener
def listener(state):
states.append(state)
if state == KazooState.CONNECTED:
rc.set()
self.client.stop()
eq_(states, [KazooState.LOST])
states.pop()
self.client.start()
rc.wait(2)
eq_(states, [KazooState.CONNECTED])
rc.clear()
states.pop()
self.expire_session()
rc.wait(2)
req_states = [KazooState.LOST, KazooState.CONNECTED]
eq_(states, req_states)
class TestClientConstructor(unittest.TestCase):
def _makeOne(self, *args, **kw):
from kazoo.client import KazooClient
return KazooClient(*args, **kw)
def test_invalid_handler(self):
from kazoo.handlers.threading import SequentialThreadingHandler
self.assertRaises(ConfigurationError,
self._makeOne, handler=SequentialThreadingHandler)
def test_chroot(self):
self.assertEqual(self._makeOne(hosts='127.0.0.1:2181/').chroot, '')
self.assertEqual(self._makeOne(hosts='127.0.0.1:2181/a').chroot, '/a')
self.assertEqual(self._makeOne(hosts='127.0.0.1/a').chroot, '/a')
self.assertEqual(self._makeOne(hosts='127.0.0.1/a/b').chroot, '/a/b')
self.assertEqual(self._makeOne(
hosts='127.0.0.1:2181,127.0.0.1:2182/a/b').chroot, '/a/b')
def test_connection_timeout(self):
from kazoo.handlers.threading import TimeoutError
client = self._makeOne(hosts='127.0.0.1:9')
self.assertTrue(client.handler.timeout_exception is TimeoutError)
self.assertRaises(TimeoutError, client.start, 0.1)
def test_ordered_host_selection(self):
client = self._makeOne(hosts='127.0.0.1:9,127.0.0.2:9/a',
randomize_hosts=False)
hosts = [h for h in client.hosts]
eq_(hosts, [('127.0.0.1', 9), ('127.0.0.2', 9)])
def test_invalid_hostname(self):
client = self._makeOne(hosts='nosuchhost/a')
timeout = client.handler.timeout_exception
self.assertRaises(timeout, client.start, 0.1)
def test_retry_options_dict(self):
from kazoo.retry import KazooRetry
client = self._makeOne(command_retry=dict(max_tries=99),
connection_retry=dict(delay=99))
self.assertTrue(type(client._conn_retry) is KazooRetry)
self.assertTrue(type(client._retry) is KazooRetry)
eq_(client._retry.max_tries, 99)
eq_(client._conn_retry.delay, 99)
class TestAuthentication(KazooTestCase):
def _makeAuth(self, *args, **kwargs):
from kazoo.security import make_digest_acl
return make_digest_acl(*args, **kwargs)
def test_auth(self):
username = uuid.uuid4().hex
password = uuid.uuid4().hex
digest_auth = "%s:%s" % (username, password)
acl = self._makeAuth(username, password, all=True)
client = self._get_client()
client.start()
client.add_auth("digest", digest_auth)
client.default_acl = (acl,)
try:
client.create("/1")
client.create("/1/2")
client.ensure_path("/1/2/3")
eve = self._get_client()
eve.start()
self.assertRaises(NoAuthError, eve.get, "/1/2")
# try again with the wrong auth token
eve.add_auth("digest", "badbad:bad")
self.assertRaises(NoAuthError, eve.get, "/1/2")
finally:
# Ensure we remove the ACL protected nodes
client.delete("/1", recursive=True)
eve.stop()
eve.close()
def test_connect_auth(self):
username = uuid.uuid4().hex
password = uuid.uuid4().hex
digest_auth = "%s:%s" % (username, password)
acl = self._makeAuth(username, password, all=True)
client = self._get_client(auth_data=[('digest', digest_auth)])
client.start()
try:
client.create('/1', acl=(acl,))
# give ZK a chance to copy data to other node
time.sleep(0.1)
self.assertRaises(NoAuthError, self.client.get, "/1")
finally:
client.delete('/1')
client.stop()
client.close()
def test_unicode_auth(self):
username = u("xe4/\hm")
password = u("/\<PASSWORD>")
digest_auth = "%s:%s" % (username, password)
acl = self._makeAuth(username, password, all=True)
client = self._get_client()
client.start()
client.add_auth("digest", digest_auth)
client.default_acl = (acl,)
try:
client.create("/1")
client.ensure_path("/1/2/3")
eve = self._get_client()
eve.start()
self.assertRaises(NoAuthError, eve.get, "/1/2")
# try again with the wrong auth token
eve.add_auth("digest", "badbad:bad")
self.assertRaises(NoAuthError, eve.get, "/1/2")
finally:
# Ensure we remove the ACL protected nodes
client.delete("/1", recursive=True)
eve.stop()
eve.close()
def test_invalid_auth(self):
client = self._get_client()
client.start()
self.assertRaises(TypeError, client.add_auth,
'digest', ('user', 'pass'))
self.assertRaises(TypeError, client.add_auth,
None, ('user', 'pass'))
def test_async_auth(self):
client = self._get_client()
client.start()
username = uuid.uuid4().hex
password = <PASSWORD>
digest_auth = "%s:%s" % (username, password)
result = client.add_auth_async("digest", digest_auth)
self.assertTrue(result.get())
def test_async_auth_failure(self):
client = self._get_client()
client.start()
username = uuid.uuid4().hex
password = <PASSWORD>
digest_auth = "%s:%s" % (username, password)
self.assertRaises(AuthFailedError, client.add_auth,
"unknown-scheme", digest_auth)
def test_add_auth_on_reconnect(self):
client = self._get_client()
client.start()
client.add_auth("digest", "jsmith:jsmith")
client._connection._socket.shutdown(socket.SHUT_RDWR)
while not client.connected:
time.sleep(0.1)
self.assertTrue(("digest", "jsmith:jsmith") in client.auth_data)
class TestConnection(KazooTestCase):
def test_chroot_warning(self):
k = self._get_nonchroot_client()
k.chroot = 'abba'
try:
with patch('warnings.warn') as mock_func:
k.start()
assert mock_func.called
finally:
k.stop()
def test_session_expire(self):
from kazoo.protocol.states import KazooState
cv = threading.Event()
def watch_events(event):
if event == KazooState.LOST:
cv.set()
self.client.add_listener(watch_events)
self.expire_session()
cv.wait(3)
assert cv.is_set()
def test_bad_session_expire(self):
from kazoo.protocol.states import KazooState
cv = threading.Event()
ab = threading.Event()
def watch_events(event):
if event == KazooState.LOST:
ab.set()
raise Exception("oops")
cv.set()
self.client.add_listener(watch_events)
self.expire_session()
ab.wait(0.5)
assert ab.is_set()
cv.wait(0.5)
assert not cv.is_set()
def test_state_listener(self):
from kazoo.protocol.states import KazooState
states = []
condition = threading.Condition()
def listener(state):
with condition:
states.append(state)
condition.notify_all()
self.client.stop()
eq_(self.client.state, KazooState.LOST)
self.client.add_listener(listener)
self.client.start(5)
with condition:
if not states:
condition.wait(5)
eq_(len(states), 1)
eq_(states[0], KazooState.CONNECTED)
def test_invalid_listener(self):
self.assertRaises(ConfigurationError, self.client.add_listener, 15)
def test_listener_only_called_on_real_state_change(self):
from kazoo.protocol.states import KazooState
self.assertTrue(self.client.state, KazooState.CONNECTED)
called = [False]
condition = threading.Event()
def listener(state):
called[0] = True
condition.set()
self.client.add_listener(listener)
self.client._make_state_change(KazooState.CONNECTED)
condition.wait(3)
self.assertFalse(called[0])
def test_no_connection(self):
client = self.client
client.stop()
self.assertFalse(client.connected)
self.assertTrue(client.client_id is None)
self.assertRaises(ConnectionClosedError, client.exists, '/')
def test_close_connecting_connection(self):
client = self.client
client.stop()
ev = threading.Event()
def close_on_connecting(state):
if state in (KazooState.CONNECTED, KazooState.LOST):
ev.set()
client.add_listener(close_on_connecting)
client.start()
# Wait until we connect
ev.wait(5)
ev.clear()
self.client._call(_CONNECTION_DROP, client.handler.async_result())
client.stop()
# ...and then wait until the connection is lost
ev.wait(5)
self.assertRaises(ConnectionClosedError,
self.client.create, '/foobar')
def test_double_start(self):
self.assertTrue(self.client.connected)
self.client.start()
self.assertTrue(self.client.connected)
def test_double_stop(self):
self.client.stop()
self.assertFalse(self.client.connected)
self.client.stop()
self.assertFalse(self.client.connected)
def test_restart(self):
self.assertTrue(self.client.connected)
self.client.restart()
self.assertTrue(self.client.connected)
def test_closed(self):
client = self.client
client.stop()
write_pipe = client._connection._write_pipe
# close the connection to free the pipe
client.close()
eq_(client._connection._write_pipe, None)
# sneak in and patch client to simulate race between a thread
# calling stop(); close() and one running a command
oldstate = client._state
client._state = KeeperState.CONNECTED
client._connection._write_pipe = write_pipe
try:
# simulate call made after write pipe is closed
self.assertRaises(ConnectionClosedError, client.exists, '/')
# simualte call made after write pipe is set to None
client._connection._write_pipe = None
self.assertRaises(ConnectionClosedError, client.exists, '/')
finally:
# reset for teardown
client._state = oldstate
client._connection._write_pipe = None
class TestClient(KazooTestCase):
def _getKazooState(self):
from kazoo.protocol.states import KazooState
return KazooState
def test_client_id(self):
client_id = self.client.client_id
self.assertEqual(type(client_id), tuple)
# make sure password is of correct length
self.assertEqual(len(client_id[1]), 16)
def test_connected(self):
client = self.client
self.assertTrue(client.connected)
def test_create(self):
client = self.client
path = client.create("/1")
eq_(path, "/1")
self.assertTrue(client.exists("/1"))
def test_create_on_broken_connection(self):
client = self.client
client.start()
client._state = KeeperState.EXPIRED_SESSION
self.assertRaises(SessionExpiredError, client.create,
'/closedpath', b'bar')
client._state = KeeperState.AUTH_FAILED
self.assertRaises(AuthFailedError, client.create,
'/closedpath', b'bar')
client._state = KeeperState.CONNECTING
self.assertRaises(SessionExpiredError, client.create,
'/closedpath', b'bar')
client.stop()
client.close()
self.assertRaises(ConnectionClosedError, client.create,
'/closedpath', b'bar')
def test_create_null_data(self):
client = self.client
client.create("/nulldata", None)
value, _ = client.get("/nulldata")
self.assertEqual(value, None)
def test_create_empty_string(self):
client = self.client
client.create("/empty", b"")
value, _ = client.get("/empty")
eq_(value, b"")
def test_create_unicode_path(self):
client = self.client
path = client.create(u("/ascii"))
eq_(path, u("/ascii"))
path = client.create(u("/\xe4hm"))
eq_(path, u("/\xe4hm"))
def test_create_async_returns_unchrooted_path(self):
client = self.client
path = client.create_async('/1').get()
eq_(path, "/1")
def test_create_invalid_path(self):
client = self.client
self.assertRaises(TypeError, client.create, ('a', ))
self.assertRaises(ValueError, client.create, ".")
self.assertRaises(ValueError, client.create, "/a/../b")
self.assertRaises(BadArgumentsError, client.create, "/b\x00")
self.assertRaises(BadArgumentsError, client.create, "/b\x1e")
def test_create_invalid_arguments(self):
from kazoo.security import OPEN_ACL_UNSAFE
single_acl = OPEN_ACL_UNSAFE[0]
client = self.client
self.assertRaises(TypeError, client.create, 'a', acl='all')
self.assertRaises(TypeError, client.create, 'a', acl=single_acl)
self.assertRaises(TypeError, client.create, 'a', value=['a'])
self.assertRaises(TypeError, client.create, 'a', ephemeral='yes')
self.assertRaises(TypeError, client.create, 'a', sequence='yes')
self.assertRaises(TypeError, client.create, 'a', makepath='yes')
def test_create_value(self):
client = self.client
client.create("/1", b"bytes")
data, stat = client.get("/1")
eq_(data, b"bytes")
def test_create_unicode_value(self):
client = self.client
self.assertRaises(TypeError, client.create, "/1", u("\xe4hm"))
def test_create_large_value(self):
client = self.client
kb_512 = b"a" * (512 * 1024)
client.create("/1", kb_512)
self.assertTrue(client.exists("/1"))
mb_2 = b"a" * (2 * 1024 * 1024)
self.assertRaises(ConnectionLoss, client.create, "/2", mb_2)
def test_create_acl_duplicate(self):
from kazoo.security import OPEN_ACL_UNSAFE
single_acl = OPEN_ACL_UNSAFE[0]
client = self.client
client.create("/1", acl=[single_acl, single_acl])
acls, stat = client.get_acls("/1")
# ZK >3.4 removes duplicate ACL entries
if TRAVIS_ZK_VERSION:
version = TRAVIS_ZK_VERSION
else:
version = client.server_version()
self.assertEqual(len(acls), 1 if version > (3, 4) else 2)
def test_create_acl_empty_list(self):
from kazoo.security import OPEN_ACL_UNSAFE
client = self.client
client.create("/1", acl=[])
acls, stat = client.get_acls("/1")
self.assertEqual(acls, OPEN_ACL_UNSAFE)
def test_version_no_connection(self):
@raises(ConnectionLoss)
def testit():
self.client.server_version()
self.client.stop()
testit()
def test_create_ephemeral(self):
client = self.client
client.create("/1", b"ephemeral", ephemeral=True)
data, stat = client.get("/1")
eq_(data, b"ephemeral")
eq_(stat.ephemeralOwner, client.client_id[0])
def test_create_no_ephemeral(self):
client = self.client
client.create("/1", b"val1")
data, stat = client.get("/1")
self.assertFalse(stat.ephemeralOwner)
def test_create_ephemeral_no_children(self):
from kazoo.exceptions import NoChildrenForEphemeralsError
client = self.client
client.create("/1", b"ephemeral", ephemeral=True)
self.assertRaises(NoChildrenForEphemeralsError,
client.create, "/1/2", b"val1")
self.assertRaises(NoChildrenForEphemeralsError,
client.create, "/1/2", b"val1", ephemeral=True)
def test_create_sequence(self):
client = self.client
client.create("/folder")
path = client.create("/folder/a", b"sequence", sequence=True)
eq_(path, "/folder/a0000000000")
path2 = client.create("/folder/a", b"sequence", sequence=True)
eq_(path2, "/folder/a0000000001")
path3 = client.create("/folder/", b"sequence", sequence=True)
eq_(path3, "/folder/0000000002")
def test_create_ephemeral_sequence(self):
basepath = "/" + uuid.uuid4().hex
realpath = self.client.create(basepath, b"sandwich", sequence=True,
ephemeral=True)
self.assertTrue(basepath != realpath and realpath.startswith(basepath))
data, stat = self.client.get(realpath)
eq_(data, b"sandwich")
def test_create_makepath(self):
self.client.create("/1/2", b"val1", makepath=True)
data, stat = self.client.get("/1/2")
eq_(data, b"val1")
self.client.create("/1/2/3/4/5", b"val2", makepath=True)
data, stat = self.client.get("/1/2/3/4/5")
eq_(data, b"val2")
self.assertRaises(NodeExistsError, self.client.create, "/1/2/3/4/5",
b"val2", makepath=True)
def test_create_makepath_incompatible_acls(self):
from kazoo.client import KazooClient
from kazoo.security import make_digest_acl_credential, CREATOR_ALL_ACL
credential = make_digest_acl_credential("username", "password")
alt_client = KazooClient(self.cluster[0].address + self.client.chroot,
max_retries=5, auth_data=[("digest", credential)])
alt_client.start()
alt_client.create("/1/2", b"val2", makepath=True, acl=CREATOR_ALL_ACL)
try:
self.assertRaises(NoAuthError, self.client.create, "/1/2/3/4/5",
b"val2", makepath=True)
finally:
alt_client.delete('/', recursive=True)
alt_client.stop()
def test_create_no_makepath(self):
self.assertRaises(NoNodeError, self.client.create, "/1/2", b"val1")
self.assertRaises(NoNodeError, self.client.create, "/1/2", b"val1",
makepath=False)
self.client.create("/1/2", b"val1", makepath=True)
self.assertRaises(NoNodeError, self.client.create, "/1/2/3/4", b"val1",
makepath=False)
def test_create_exists(self):
from kazoo.exceptions import NodeExistsError
client = self.client
path = client.create("/1")
self.assertRaises(NodeExistsError, client.create, path)
def test_create_get_set(self):
nodepath = "/" + uuid.uuid4().hex
self.client.create(nodepath, b"sandwich", ephemeral=True)
data, stat = self.client.get(nodepath)
eq_(data, b"sandwich")
newstat = self.client.set(nodepath, b"hats", stat.version)
self.assertTrue(newstat)
assert newstat.version > stat.version
# Some other checks of the ZnodeStat object we got
eq_(newstat.acl_version, stat.acl_version)
eq_(newstat.created, stat.ctime / 1000.0)
eq_(newstat.last_modified, newstat.mtime / 1000.0)
eq_(newstat.owner_session_id, stat.ephemeralOwner)
eq_(newstat.creation_transaction_id, stat.czxid)
eq_(newstat.last_modified_transaction_id, newstat.mzxid)
eq_(newstat.data_length, newstat.dataLength)
eq_(newstat.children_count, stat.numChildren)
eq_(newstat.children_version, stat.cversion)
def test_get_invalid_arguments(self):
client = self.client
self.assertRaises(TypeError, client.get, ('a', 'b'))
self.assertRaises(TypeError, client.get, 'a', watch=True)
def test_bad_argument(self):
client = self.client
client.ensure_path("/1")
self.assertRaises(TypeError, self.client.set, "/1", 1)
def test_ensure_path(self):
client = self.client
client.ensure_path("/1/2")
self.assertTrue(client.exists("/1/2"))
client.ensure_path("/1/2/3/4")
self.assertTrue(client.exists("/1/2/3/4"))
def test_sync(self):
client = self.client
self.assertTrue(client.sync('/'), '/')
def test_exists(self):
nodepath = "/" + uuid.uuid4().hex
exists = self.client.exists(nodepath)
eq_(exists, None)
self.client.create(nodepath, b"sandwich", ephemeral=True)
exists = self.client.exists(nodepath)
self.assertTrue(exists)
assert isinstance(exists.version, int)
multi_node_nonexistent = "/" + uuid.uuid4().hex + "/hats"
exists = self.client.exists(multi_node_nonexistent)
eq_(exists, None)
def test_exists_invalid_arguments(self):
client = self.client
self.assertRaises(TypeError, client.exists, ('a', 'b'))
self.assertRaises(TypeError, client.exists, 'a', watch=True)
def test_exists_watch(self):
nodepath = "/" + uuid.uuid4().hex
event = self.client.handler.event_object()
def w(watch_event):
eq_(watch_event.path, nodepath)
event.set()
exists = self.client.exists(nodepath, watch=w)
eq_(exists, None)
self.client.create(nodepath, ephemeral=True)
event.wait(1)
self.assertTrue(event.is_set())
def test_exists_watcher_exception(self):
nodepath = "/" + uuid.uuid4().hex
event = self.client.handler.event_object()
# if the watcher throws an exception, all we can really do is log it
def w(watch_event):
eq_(watch_event.path, nodepath)
event.set()
raise Exception("test exception in callback")
exists = self.client.exists(nodepath, watch=w)
eq_(exists, None)
self.client.create(nodepath, ephemeral=True)
event.wait(1)
self.assertTrue(event.is_set())
def test_create_delete(self):
nodepath = "/" + uuid.uuid4().hex
self.client.create(nodepath, b"zzz")
self.client.delete(nodepath)
exists = self.client.exists(nodepath)
eq_(exists, None)
def test_get_acls(self):
from kazoo.security import make_digest_acl
acl = make_digest_acl('user', 'pass', all=True)
client = self.client
try:
client.create('/a', acl=[acl])
self.assertTrue(acl in client.get_acls('/a')[0])
finally:
client.delete('/a')
def test_get_acls_invalid_arguments(self):
client = self.client
self.assertRaises(TypeError, client.get_acls, ('a', 'b'))
def test_set_acls(self):
from kazoo.security import make_digest_acl
acl = make_digest_acl('user', 'pass', all=True)
client = self.client
client.create('/a')
try:
client.set_acls('/a', [acl])
self.assertTrue(acl in client.get_acls('/a')[0])
finally:
client.delete('/a')
def test_set_acls_empty(self):
client = self.client
client.create('/a')
self.assertRaises(InvalidACLError, client.set_acls, '/a', [])
def test_set_acls_no_node(self):
from kazoo.security import OPEN_ACL_UNSAFE
client = self.client
self.assertRaises(NoNodeError, client.set_acls, '/a', OPEN_ACL_UNSAFE)
def test_set_acls_invalid_arguments(self):
from kazoo.security import OPEN_ACL_UNSAFE
single_acl = OPEN_ACL_UNSAFE[0]
client = self.client
self.assertRaises(TypeError, client.set_acls, ('a', 'b'), ())
self.assertRaises(TypeError, client.set_acls, 'a', single_acl)
self.assertRaises(TypeError, client.set_acls, 'a', 'all')
self.assertRaises(TypeError, client.set_acls, 'a', [single_acl], 'V1')
def test_set(self):
client = self.client
client.create('a', b'first')
stat = client.set('a', b'second')
data, stat2 = client.get('a')
self.assertEqual(data, b'second')
self.assertEqual(stat, stat2)
def test_set_null_data(self):
client = self.client
client.create("/nulldata", b"not none")
client.set("/nulldata", None)
value, _ = client.get("/nulldata")
self.assertEqual(value, None)
def test_set_empty_string(self):
client = self.client
client.create("/empty", b"not empty")
client.set("/empty", b"")
value, _ = client.get("/empty")
eq_(value, b"")
def test_set_invalid_arguments(self):
client = self.client
client.create('a', b'first')
self.assertRaises(TypeError, client.set, ('a', 'b'), b'value')
self.assertRaises(TypeError, client.set, 'a', ['v', 'w'])
self.assertRaises(TypeError, client.set, 'a', b'value', 'V1')
def test_delete(self):
client = self.client
client.ensure_path('/a/b')
self.assertTrue('b' in client.get_children('a'))
client.delete('/a/b')
self.assertFalse('b' in client.get_children('a'))
def test_delete_recursive(self):
client = self.client
client.ensure_path('/a/b/c')
client.ensure_path('/a/b/d')
client.delete('/a/b', recursive=True)
client.delete('/a/b/c', recursive=True)
self.assertFalse('b' in client.get_children('a'))
def test_delete_invalid_arguments(self):
client = self.client
client.ensure_path('/a/b')
self.assertRaises(TypeError, client.delete, '/a/b', recursive='all')
self.assertRaises(TypeError, client.delete, ('a', 'b'))
self.assertRaises(TypeError, client.delete, '/a/b', version='V1')
def test_get_children(self):
client = self.client
client.ensure_path('/a/b/c')
client.ensure_path('/a/b/d')
self.assertEqual(client.get_children('/a'), ['b'])
self.assertEqual(set(client.get_children('/a/b')), set(['c', 'd']))
self.assertEqual(client.get_children('/a/b/c'), [])
def test_get_children2(self):
client = self.client
client.ensure_path('/a/b')
children, stat = client.get_children('/a', include_data=True)
value, stat2 = client.get('/a')
self.assertEqual(children, ['b'])
self.assertEqual(stat2.version, stat.version)
def test_get_children2_many_nodes(self):
client = self.client
client.ensure_path('/a/b')
client.ensure_path('/a/c')
client.ensure_path('/a/d')
children, stat = client.get_children('/a', include_data=True)
value, stat2 = client.get('/a')
self.assertEqual(set(children), set(['b', 'c', 'd']))
self.assertEqual(stat2.version, stat.version)
def test_get_children_no_node(self):
client = self.client
self.assertRaises(NoNodeError, client.get_children, '/none')
self.assertRaises(NoNodeError, client.get_children,
'/none', include_data=True)
def test_get_children_invalid_path(self):
client = self.client
self.assertRaises(ValueError, client.get_children, '../a')
def test_get_children_invalid_arguments(self):
client = self.client
self.assertRaises(TypeError, client.get_children, ('a', 'b'))
self.assertRaises(TypeError, client.get_children, 'a', watch=True)
self.assertRaises(TypeError, client.get_children,
'a', include_data='yes')
def test_invalid_auth(self):
from kazoo.exceptions import AuthFailedError
from kazoo.protocol.states import KeeperState
client = self.client
client.stop()
client._state = KeeperState.AUTH_FAILED
@raises(AuthFailedError)
def testit():
client.get('/')
testit()
def test_client_state(self):
from kazoo.protocol.states import KeeperState
eq_(self.client.client_state, KeeperState.CONNECTED)
def test_update_host_list(self):
from kazoo.client import KazooClient
from kazoo.protocol.states import KeeperState
hosts = self.cluster[0].address
# create a client with only one server in its list
client = KazooClient(hosts=hosts)
client.start()
# try to change the chroot, not currently allowed
self.assertRaises(ConfigurationError,
client.set_hosts, hosts + '/new_chroot')
# grow the cluster to 3
client.set_hosts(self.servers)
# shut down the first host
try:
self.cluster[0].stop()
time.sleep(5)
eq_(client.client_state, KeeperState.CONNECTED)
finally:
self.cluster[0].run()
dummy_dict = {
'aversion': 1, 'ctime': 0, 'cversion': 1,
'czxid': 110, 'dataLength': 1, 'ephemeralOwner': 'ben',
'mtime': 1, 'mzxid': 1, 'numChildren': 0, 'pzxid': 1, 'version': 1
}
class TestClientTransactions(KazooTestCase):
def setUp(self):
KazooTestCase.setUp(self)
skip = False
if TRAVIS_ZK_VERSION and TRAVIS_ZK_VERSION < (3, 4):
skip = True
elif TRAVIS_ZK_VERSION and TRAVIS_ZK_VERSION >= (3, 4):
skip = False
else:
ver = self.client.server_version()
if ver[1] < 4:
skip = True
if skip:
raise SkipTest("Must use Zookeeper 3.4 or above")
def test_basic_create(self):
t = self.client.transaction()
t.create('/freddy')
t.create('/fred', ephemeral=True)
t.create('/smith', sequence=True)
results = t.commit()
eq_(results[0], '/freddy')
eq_(len(results), 3)
self.assertTrue(results[2].startswith('/smith0'))
def test_bad_creates(self):
args_list = [(True,), ('/smith', 0), ('/smith', b'', 'bleh'),
('/smith', b'', None, 'fred'),
('/smith', b'', None, True, 'fred')]
@raises(TypeError)
def testit(args):
t = self.client.transaction()
t.create(*args)
for args in args_list:
testit(args)
def test_default_acl(self):
from kazoo.security import make_digest_acl
username = uuid.uuid4().hex
password = uuid.uuid4().hex
digest_auth = "%s:%s" % (username, password)
acl = make_digest_acl(username, password, all=True)
self.client.add_auth("digest", digest_auth)
self.client.default_acl = (acl,)
t = self.client.transaction()
t.create('/freddy')
results = t.commit()
eq_(results[0], '/freddy')
def test_basic_delete(self):
self.client.create('/fred')
t = self.client.transaction()
t.delete('/fred')
results = t.commit()
eq_(results[0], True)
def test_bad_deletes(self):
args_list = [(True,), ('/smith', 'woops'), ]
@raises(TypeError)
def testit(args):
t = self.client.transaction()
t.delete(*args)
for args in args_list:
testit(args)
def test_set(self):
self.client.create('/fred', b'01')
t = self.client.transaction()
t.set_data('/fred', b'oops')
t.commit()
res = self.client.get('/fred')
eq_(res[0], b'oops')
def test_bad_sets(self):
args_list = [(42, 52), ('/smith', False), ('/smith', b'', 'oops')]
@raises(TypeError)
def testit(args):
t = self.client.transaction()
t.set_data(*args)
for args in args_list:
testit(args)
def test_check(self):
self.client.create('/fred')
version = self.client.get('/fred')[1].version
t = self.client.transaction()
t.check('/fred', version)
t.create('/blah')
results = t.commit()
eq_(results[0], True)
eq_(results[1], '/blah')
def test_bad_checks(self):
args_list = [(42, 52), ('/smith', 'oops')]
@raises(TypeError)
def testit(args):
t = self.client.transaction()
t.check(*args)
for args in args_list:
testit(args)
def test_bad_transaction(self):
from kazoo.exceptions import RolledBackError, NoNodeError
t = self.client.transaction()
t.create('/fred')
t.delete('/smith')
results = t.commit()
eq_(results[0].__class__, RolledBackError)
eq_(results[1].__class__, NoNodeError)
def test_bad_commit(self):
t = self.client.transaction()
@raises(ValueError)
def testit():
t.commit()
t.committed = True
testit()
def test_bad_context(self):
@raises(TypeError)
def testit():
with self.client.transaction() as t:
t.check(4232)
testit()
def test_context(self):
with self.client.transaction() as t:
t.create('/smith', b'32')
eq_(self.client.get('/smith')[0], b'32')
class TestCallbacks(unittest.TestCase):
def test_session_callback_states(self):
from kazoo.protocol.states import KazooState, KeeperState
from kazoo.client import KazooClient
client = KazooClient()
client._handle = 1
client._live.set()
result = client._session_callback(KeeperState.CONNECTED)
eq_(result, None)
# Now with stopped
client._stopped.set()
result = client._session_callback(KeeperState.CONNECTED)
eq_(result, None)
# Test several state transitions
client._stopped.clear()
client.start_async = lambda: True
client._session_callback(KeeperState.CONNECTED)
eq_(client.state, KazooState.CONNECTED)
client._session_callback(KeeperState.AUTH_FAILED)
eq_(client.state, KazooState.LOST)
client._handle = 1
client._session_callback(-250)
eq_(client.state, KazooState.SUSPENDED)
class TestNonChrootClient(KazooTestCase):
def test_create(self):
client = self._get_nonchroot_client()
self.assertEqual(client.chroot, '')
client.start()
node = uuid.uuid4().hex
path = client.create(node, ephemeral=True)
client.delete(path)
client.stop()
def test_unchroot(self):
client = self._get_nonchroot_client()
client.chroot = '/a'
self.assertEquals(client.unchroot('/a/b'), '/b')
self.assertEquals(client.unchroot('/b/c'), '/b/c')
|
StarcoderdataPython
|
1772671
|
import re
from espider.spider import Spider
class TSpider(Spider):
__custom_setting__ = {
'max_retry': 0,
'max_thread': 10
}
index = 1
def start_requests(self):
self.url = 'http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing={}'
yield self.request(self.url.format(1))
def parse(self, response, *args, **kwargs):
next_ = response.text.split()[-1]
print('{}:{}'.format(self.index, response.text))
yield self.request(self.url.format(next_), callback=self.parse)
try:
self.index += 1
n = re.search('next', response.text).group()
except:
self.close()
if __name__ == '__main__':
sp = TSpider()
sp.run()
|
StarcoderdataPython
|
108027
|
<reponame>Ziki2001/new-school-sdk
# -*- coding: utf-8 -*-
'''
:file: utils.py
:author: -Farmer
:url: https://blog.farmer233.top
:date: 2021/09/04 23:45:40
'''
class ObjectDict(dict):
""":copyright: (c) 2014 by messense.
Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, key):
if key in self:
return self[key]
return None
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return None
def is_endpoint(url_or_endpoint:str) -> bool:
"""判断是不是端点
Args:
url_or_endpoint (str): url 或 端点字符串
Returns:
bool: 不是http则返回False
"""
if url_or_endpoint.startswith(('http://', 'https://')):
return False
return True
|
StarcoderdataPython
|
22247
|
<reponame>mtymchenko/npaths<gh_stars>0
import unittest
import numpy as np
import matplotlib.pyplot as plt
from npaths import NPathNode, Filter, Circulator
__all__ = [
'TestNPathNode',
'TestFilter',
'TestCirculator'
]
GHz = 1e9
ohm = 1
pF = 1e-12
freqs = np.linspace(0.001, 6, 500)*GHz
class TestNPathNode(unittest.TestCase):
def test_sparam(self):
node = NPathNode(
freqs=freqs,
freq_mod=1*GHz,
C=3*pF)
S11 = node.sparam(1, 1)
S21 = node.sparam(2, 1)
plt.figure()
plt.plot(freqs/GHz, 10*np.log10(np.abs(S11)))
plt.plot(freqs/GHz, 10*np.log10(np.abs(S21)))
plt.grid()
plt.show()
class TestFilter(unittest.TestCase):
def test_sparam(self):
node = Filter(
freqs=freqs,
freq_mod=1*GHz,
C=15*pF)
S11 = node.sparam(1, 1)
S21 = node.sparam(2, 1)
S21_8 = node.sparam(2, 1, 8)
S21_16 = node.sparam(2, 1, 16)
plt.figure()
plt.plot(freqs/GHz, 20*np.log10(np.abs(S11)))
plt.plot(freqs/GHz, 20*np.log10(np.abs(S21)))
plt.plot(freqs/GHz, 20*np.log10(np.abs(S21_8)))
plt.plot(freqs/GHz, 20*np.log10(np.abs(S21_16)))
plt.grid()
plt.show()
class TestCirculator(unittest.TestCase):
def test_sparam(self):
node = Circulator(
freqs=freqs,
freq_mod=1*GHz,
n_harmonics=60,
n_harmonics_subset=15,
C=1*pF)
S11 = node.sparam(1, 1)
S21 = node.sparam(2, 1)
S12 = node.sparam(1, 2)
S31 = node.sparam(2, 1)
plt.figure()
plt.plot(freqs/GHz, 10*np.log10(np.abs(S11)))
plt.plot(freqs/GHz, 10*np.log10(np.abs(S21)))
plt.plot(freqs/GHz, 10*np.log10(np.abs(S12)))
plt.plot(freqs/GHz, 10*np.log10(np.abs(S31)))
plt.grid()
plt.show()
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3304752
|
<filename>docs/examples/e06_simple_bot_structure/modules/meta.py<gh_stars>100-1000
from hata import Client
from hata.ext.commands_v2 import checks
Sakuya : Client
@Sakuya.commands
@checks.owner_only()
async def ping():
"""Pongs."""
return 'pong'
|
StarcoderdataPython
|
181027
|
<reponame>froukees/querybook<filename>querybook/server/lib/table_upload/importer/base_importer.py
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from pandas import DataFrame
from lib.table_upload.common import ImporterResourceType
class BaseTableUploadImporter(ABC):
def __init__(self, data: Any, import_config: Optional[Dict] = None):
"""
Args:
data (Any): This represents the resource itself. For example, it could be the File object,
The query execution id, the Google sheets URL, etc
import_config (Optional[Dict]): The optional config is to tell the importer how to read the data
It could be the CSV format, etc.
"""
self.data = data
self.import_config = import_config
def get_resource_path(self) -> Tuple[ImporterResourceType, Any]:
"""Return a remote location where the data can be read.
For example, if importing data from a query execution that is stored on S3,
this should return something along the lines of
[ImporterResourceType.S3, 'key/file.csv']
Returns:
Tuple[ImporterResourceType, Any]: Resource type,
"""
return [None, None]
@abstractmethod
def get_pandas_df(self) -> DataFrame:
"""
Override this method to return a data frame that contains the data
Returns:
DataFrame: data represented data frame
"""
raise NotImplementedError()
@abstractmethod
def get_columns(self) -> List[Tuple[str, str]]:
"""
Override this method to get column names and types for the data
Returns:
List[Tuple[str, str]]: List of [col name, col type pair]
col types
"""
raise NotImplementedError()
|
StarcoderdataPython
|
15867
|
<reponame>gcunhase/tensorflow-onnx
# SPDX-License-Identifier: Apache-2.0
"""Graph Optimizer Base"""
import copy
from .. import logging, utils
class GraphOptimizerBase(object):
"""optimizer graph to improve performance
"""
def __init__(self):
self._logger = logging.getLogger('.'.join(__name__.split('.')[:-1] + [self.__class__.__name__]))
self._graph_been_opt = False
self.opt_iteration = 0
@property
def logger(self):
return self._logger
@property
def is_debug_mode(self):
return utils.is_debug_mode()
@property
def graph_been_opt(self):
return self._graph_been_opt
@graph_been_opt.setter
def graph_been_opt(self, value):
self._graph_been_opt = value
def optimize(self, graph, iteration):
""" Optimize graph, return optimized graph. """
before = graph.dump_node_statistics()
self.opt_iteration = iteration
graph = self._optimize(graph)
graph.update_proto()
graph.delete_unused_nodes(graph.outputs)
after = graph.dump_node_statistics()
self._print_stat_diff(before, after)
return graph
def _optimize(self, graph):
""" Derived class should override this function. """
raise NotImplementedError
@staticmethod
def _apply_optimization(graph, optimize_func):
"""
optimize graph
will also optimize graph of nodes'
Args:
graph: the top level graph to be optimized
optimize_func: function to optimize graph
"""
graph = optimize_func(graph)
for node in graph.get_nodes():
body_graphs = node.get_body_graphs()
if body_graphs:
for attr, b_g in body_graphs.items():
b_g = GraphOptimizerBase._apply_optimization(b_g, optimize_func)
node.set_body_graph_as_attr(attr, b_g)
return graph
def _print_stat_diff(self, before, after):
diff = copy.deepcopy(after)
diff.subtract(before)
diff = ["{} {} ({}->{})".format(k, str(v) if v < 0 else '+' + str(v), before.get(k, 0), after.get(k, 0))
for k, v in sorted(diff.items()) if v != 0]
self.logger.verbose(', '.join(diff) if diff else "no change")
|
StarcoderdataPython
|
1654436
|
<gh_stars>0
from .client import SkyRouter
|
StarcoderdataPython
|
3366545
|
"""
mcpython - a minecraft clone written in python licenced under the MIT-licence
(https://github.com/mcpython4-coding/core)
Contributors: uuk, xkcdjerry (inactive)
Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence
Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA
(https://account.mojang.com/documents/minecraft_eula)
Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar
This project is not official by mojang and does not relate to it.
"""
import asyncio
import random
import typing
import deprecation
import mcpython.common.config
import mcpython.common.data.DataPacks
import mcpython.common.entity.PlayerEntity
import mcpython.common.state.GameViewStatePart
import mcpython.common.world.Chunk
import mcpython.common.world.Dimension
import mcpython.common.world.GameRule
import mcpython.common.world.OffProcessWorldAccess
import mcpython.common.world.SaveFile
import mcpython.engine.world.AbstractInterface
import mcpython.server.worldgen.WorldGenerationHandler
import mcpython.util.math
import pyglet
from mcpython import shared
from mcpython.engine import logger
from mcpython.engine.Lifecycle import schedule_task
from mcpython.util.annotation import onlyInClient
from mcpython.util.callbacks import wrap_method
class World(mcpython.engine.world.AbstractInterface.IWorld):
"""
Class holding all data of the world
"""
def __init__(self, filename: str = None):
shared.world = self
# todo: add some more variation
self.spawn_point: typing.Tuple[int, int] = (
random.randint(0, 15),
random.randint(0, 15),
)
# todo: change for str-based
self.dimensions: typing.Dict[
int, mcpython.engine.world.AbstractInterface.IDimension
] = {}
self.dim_to_id: typing.Dict[str, int] = {}
shared.dimension_handler.init_dims()
# todo: change to str; todo: move to player; todo: make property
self.active_dimension: int = 0
# container for world-related config; contains: seed [build in] todo: move to config class
self.config: typing.Dict[str, typing.Any] = {}
# the gamerule handler fort his world
self.gamerule_handler: typing.Union[
mcpython.common.world.GameRule.GameRuleHandler, None
] = None
asyncio.get_event_loop().run_until_complete(
self.reset_config()
) # will reset the config
# todo: move to configs / game rules
self.hide_faces_to_not_generated_chunks: bool = True
# the file-name to use, todo: make None if not needed
self.filename: str = "tmp" if filename is None else filename
# the save file instance
self.save_file: mcpython.common.world.SaveFile.SaveFile = (
mcpython.common.world.SaveFile.SaveFile(self.filename)
)
# when in an network, stores an reference to all other players
self.players: typing.Dict[
str, mcpython.common.entity.PlayerEntity.PlayerEntity
] = {}
# The name of the local player; None on dedicated servers
self.local_player: str = "unknown" if shared.IS_CLIENT else None
self.world_loaded = False # describes if the world is loaded or not
self.world_generation_process = mcpython.common.world.OffProcessWorldAccess.OffProcessWorldHelper.spawn_process(
self
)
def tick(self):
for dimension in self.dimensions.values():
if dimension.loaded:
dimension.tick()
self.world_generation_process.run_tasks()
async def add_player(
self,
name: str,
add_inventories: bool = True,
override: bool = True,
dimension=0,
):
"""
Will add a new player into the world
:param name: the name of the player to create
:param add_inventories: if the inventories should be created
:param override: if the player should be re-created if it exists in memory
:return: the player instance
"""
if name is None:
raise ValueError("name cannot be None")
if not override and name in self.players:
return self.players[name]
self.players[name] = shared.entity_manager.spawn_entity(
"minecraft:player",
(0, 0, 0),
name,
dimension=dimension,
)
if add_inventories:
await self.players[name].create_inventories()
return self.players[name]
@onlyInClient()
def get_active_player(
self, create: bool = True
) -> typing.Union[mcpython.common.entity.PlayerEntity.PlayerEntity, None]:
"""
Returns the player instance for this client
:param create: if the player should be created or not (by calling add_player())
:return: the player instance or None if no player with the name is arrival
"""
if not create and (
self.local_player is None or self.local_player not in self.players
):
return
return (
self.players[self.local_player]
if self.local_player in self.players
else asyncio.get_event_loop().run_until_complete(
self.add_player(self.local_player)
)
)
@onlyInClient()
async def get_active_player_async(
self, create: bool = True
) -> typing.Union[mcpython.common.entity.PlayerEntity.PlayerEntity, None]:
"""
Returns the player instance for this client
:param create: if the player should be created or not (by calling add_player())
:return: the player instance or None if no player with the name is arrival
"""
if not create and (
self.local_player is None or self.local_player not in self.players
):
return
return (
self.players[self.local_player]
if self.local_player in self.players
else await self.add_player(self.local_player)
)
def get_player_by_name(self, name: str):
if name not in self.players:
asyncio.get_event_loop().run_until_complete(self.add_player(name))
return self.players[name]
async def get_player_by_name_async(self, name: str):
if name not in self.players:
await self.add_player(name)
return self.players[name]
def player_iterator(self) -> typing.Iterable:
return list(self.players.values())
def entity_iterator(self) -> typing.Iterable:
for dimension in self.dimensions.values():
yield from dimension.entity_iterator()
async def reset_config(self):
"""
Will reset the internal config of the system.
todo: change game rule handler reset to an non-new-instance
calls event world:reset_config in the process
"""
self.config = {"enable_auto_gen": False, "enable_world_barrier": False}
await shared.event_handler.call_async("world:reset_config")
self.gamerule_handler = mcpython.common.world.GameRule.GameRuleHandler(self)
@onlyInClient()
def get_active_dimension(
self,
) -> typing.Union[mcpython.engine.world.AbstractInterface.IDimension, None]:
"""
Will return the dimension the current player is in
:return: the dimension or None if no dimension is set
"""
return self.get_dimension(self.active_dimension)
def get_dimension_names(self) -> typing.Iterable[str]:
return self.dim_to_id.keys()
def get_dimension_by_name(
self, name: str
) -> mcpython.engine.world.AbstractInterface.IDimension:
if isinstance(name, mcpython.engine.world.AbstractInterface.IDimension):
logger.print_stack(
"invoked get_dimension_by_name() with dimension instance as name; this seems not right!"
)
return name
return self.dimensions[self.dim_to_id[name]]
def add_dimension(
self, dim_id: int, name: str, dim_config=None
) -> mcpython.engine.world.AbstractInterface.IDimension:
"""
will add an new dimension into the system
:param dim_id: the id to create under
:param name: the name of the dimension
:param dim_config: the dim_config to use as gen config
:return: the dimension instance
"""
if dim_config is None:
dim_config = {}
dim = self.dimensions[dim_id] = mcpython.common.world.Dimension.Dimension(
self, dim_id, name, gen_config=dim_config
)
self.dim_to_id[dim.name] = dim_id
shared.world_generation_handler.setup_dimension(dim, dim_config)
return dim
@deprecation.deprecated()
def join_dimension(self, dim_id: int):
return asyncio.get_event_loop().run_until_complete(
self.join_dimension_async(dim_id)
)
async def join_dimension_async(self, dim_id: int):
"""
Will change the dimension of the active player
:param dim_id: the dimension to change to todo: make str
todo: move to player
todo: event calls must be async-ed
"""
logger.println("changing dimension to '{}'...".format(dim_id))
await shared.event_handler.call_async(
"dimension:change:pre", self.active_dimension, dim_id
)
sector = mcpython.util.math.position_to_chunk(
(await shared.world.get_active_player_async()).position
)
logger.println("unloading chunks...")
await self.change_chunks_async(sector, None)
old = self.active_dimension
self.active_dimension = dim_id
logger.println("loading new chunks...")
await self.change_chunks_async(None, sector)
await shared.event_handler.call_async("dimension:change:post", old, dim_id)
logger.println("finished!")
def get_dimension(
self, dim_id: typing.Union[int, str]
) -> mcpython.engine.world.AbstractInterface.IDimension:
"""
will get an dimension with an special id
:param dim_id: the id to use
:return: the dimension instance or None if it does not exist
"""
if dim_id in self.dimensions:
return self.dimensions[dim_id]
if dim_id in self.dim_to_id:
return self.dimensions[self.dim_to_id[dim_id]]
# logger.print_stack("[ERROR] failed to access dim '{}', below call stack".format(dim_id))
def hit_test(
self,
position: typing.Tuple[float, float, float],
vector: typing.Tuple[float, float, float],
max_distance: int = 8,
) -> typing.Union[
typing.Tuple[
typing.Tuple[int, int, int],
typing.Tuple[int, int, int],
typing.Tuple[float, float, float],
],
typing.Tuple[None, None, None],
]:
"""
Line of sight search from current position.
If a block is intersected it is returned, along with the block previously in the line of sight.
If no block is found, return None, None, None
Will check for bounding boxes of blocks (get_view_bbox())
:param position: The (x, y, z) position to check visibility from
:param vector: The line of sight vector, as (dx, dy, dz)
:param max_distance: How many blocks away at max to search for a hit, will stop the ray after
the amount of blocks
todo: cache the bbox of the block
todo: move to dimension
todo: add variant only taking the player
todo: cache when possible
todo: add variant for entities
"""
# get m from the gamerule
m = shared.world.gamerule_handler.table["hitTestSteps"].status.status
x, y, z = position
dx, dy, dz = vector
dx /= m
dy /= m
dz /= m
previous = None
for _ in range(max_distance * m):
key = mcpython.util.math.normalize((x, y, z))
block = self.get_active_dimension().get_block(key)
if (
block
and type(block) != str
and block.get_view_bbox().test_point_hit((x, y, z), block.position)
):
return key, previous, (x, y, z)
if key != previous:
previous = key
x += dx
y += dy
z += dz
return None, None, None
def show_chunk(
self,
chunk: typing.Union[
typing.Tuple[int, int], mcpython.engine.world.AbstractInterface.IChunk
],
):
"""
Ensure all blocks in the given chunk that should be shown are
drawn to the canvas.
:param chunk: the chunk to show
"""
if not issubclass(type(chunk), mcpython.engine.world.AbstractInterface.IChunk):
chunk = self.get_active_dimension().get_chunk(*chunk, generate=False)
if chunk is None:
return
chunk.show()
def hide_chunk(
self,
chunk: typing.Union[
typing.Tuple[int, int], mcpython.engine.world.AbstractInterface.IChunk
],
):
"""
Ensure all blocks in the given chunk that should be hidden are
removed from the canvas.
:param chunk: the chunk to hide
"""
if not issubclass(type(chunk), mcpython.engine.world.AbstractInterface.IChunk):
chunk = self.get_active_dimension().get_chunk(*chunk, generate=False)
if chunk is None:
return
chunk.hide()
@deprecation.deprecated()
def change_chunks(
self,
before: typing.Union[typing.Tuple[int, int], None],
after: typing.Union[typing.Tuple[int, int], None],
generate_chunks=True,
load_immediate=True,
dimension=None,
):
"""
Move from chunk `before` to chunk `after`
:param before: the chunk before
:param after: the chunk after
:param generate_chunks: if chunks should be generated
:param load_immediate: if chunks should be loaded immediate if needed
todo: move to dimension
"""
if shared.IS_CLIENT and self.get_active_dimension() is None:
return
if dimension is None:
dimension = self.get_active_dimension()
before_set = set()
after_set = set()
pad = 4
for dx in range(-pad, pad + 1):
for dz in range(-pad, pad + 1):
if before is not None:
x, z = before
if (dx + x) ** 2 + (dz + z) ** 2 <= (pad + 1) ** 2:
before_set.add((x + dx, z + dz))
if after is not None:
x, z = after
if (dx + x) ** 2 + (dz + z) ** 2 <= (pad + 1) ** 2:
after_set.add((x + dx, z + dz))
# show = after_set - before_set
hide = before_set - after_set
for chunk in hide:
# todo: fix this, this was previously hiding chunks randomly....
pyglet.clock.schedule_once(wrap_method(dimension.hide_chunk, chunk), 0.1)
c = dimension.get_chunk(*chunk, generate=False, create=False)
if c and c.is_loaded() and not shared.IS_NETWORKING:
shared.tick_handler.schedule_once(
shared.world.save_file.dump,
None,
"minecraft:chunk",
dimension=self.active_dimension,
chunk=chunk,
)
for chunk in after_set:
c = dimension.get_chunk(*chunk, generate=False, create=False)
if c and c.is_visible():
continue
c = dimension.get_chunk(*chunk, generate=False)
pyglet.clock.schedule_once(wrap_method(dimension.show_chunk, c), 0.1)
if not shared.IS_NETWORKING:
if not load_immediate:
pyglet.clock.schedule_once(
lambda _: shared.world.save_file.read(
"minecraft:chunk",
dimension=self.active_dimension,
chunk=chunk,
),
0.1,
)
else:
shared.world.save_file.read(
"minecraft:chunk", dimension=self.active_dimension, chunk=chunk
)
else:
dimension.get_chunk(*chunk, generate=False)
if not after or shared.IS_NETWORKING:
return
for dx in range(-pad, pad + 1):
for dz in range(-pad, pad + 1):
if (
generate_chunks
and abs(dx) <= mcpython.common.config.CHUNK_GENERATION_RANGE
and abs(dz) <= mcpython.common.config.CHUNK_GENERATION_RANGE
and self.config["enable_auto_gen"]
):
chunk = dimension.get_chunk(
dx + after[0], dz + after[1], generate=False
)
if not chunk.is_generated():
shared.world_generation_handler.add_chunk_to_generation_list(
chunk
)
async def change_chunks_async(
self,
before: typing.Union[typing.Tuple[int, int], None],
after: typing.Union[typing.Tuple[int, int], None],
generate_chunks=True,
load_immediate=True,
dimension=None,
):
"""
Move from chunk `before` to chunk `after`
:param before: the chunk before
:param after: the chunk after
:param generate_chunks: if chunks should be generated
:param load_immediate: if chunks should be loaded immediate if needed
todo: move to dimension
"""
if shared.IS_CLIENT and self.get_active_dimension() is None:
return
if dimension is None:
dimension = self.get_active_dimension()
before_set = set()
after_set = set()
pad = 4
for dx in range(-pad, pad + 1):
for dz in range(-pad, pad + 1):
if before is not None:
x, z = before
if (dx + x) ** 2 + (dz + z) ** 2 <= (pad + 1) ** 2:
before_set.add((x + dx, z + dz))
if after is not None:
x, z = after
if (dx + x) ** 2 + (dz + z) ** 2 <= (pad + 1) ** 2:
after_set.add((x + dx, z + dz))
# show = after_set - before_set
hide = before_set - after_set
for chunk in hide:
# todo: fix this, this was previously hiding chunks randomly....
pyglet.clock.schedule_once(wrap_method(dimension.hide_chunk, chunk), 0.1)
c = dimension.get_chunk(*chunk, generate=False, create=False)
if c and c.is_loaded() and not shared.IS_NETWORKING:
schedule_task(
shared.world.save_file.dump_async(
None,
"minecraft:chunk",
dimension=self.active_dimension,
chunk=chunk,
)
)
for chunk in after_set:
c = dimension.get_chunk(*chunk, generate=False, create=False)
if c and c.is_visible():
continue
c = dimension.get_chunk(*chunk, generate=False)
pyglet.clock.schedule_once(wrap_method(dimension.show_chunk, c), 0.1)
if not shared.IS_NETWORKING:
if not load_immediate:
schedule_task(
shared.world.save_file.read_async(
"minecraft:chunk",
dimension=self.active_dimension,
chunk=chunk,
)
)
else:
await shared.world.save_file.read_async(
"minecraft:chunk", dimension=self.active_dimension, chunk=chunk
)
else:
dimension.get_chunk(*chunk, generate=False)
if not after or shared.IS_NETWORKING:
return
for dx in range(-pad, pad + 1):
for dz in range(-pad, pad + 1):
if (
generate_chunks
and abs(dx) <= mcpython.common.config.CHUNK_GENERATION_RANGE
and abs(dz) <= mcpython.common.config.CHUNK_GENERATION_RANGE
and self.config["enable_auto_gen"]
):
chunk = dimension.get_chunk(
dx + after[0], dz + after[1], generate=False
)
if not chunk.is_generated():
shared.world_generation_handler.add_chunk_to_generation_list(
chunk
)
async def cleanup(self, remove_dims=False, filename=None):
"""
Will clean up the world
:param remove_dims: if dimensions should be cleared
:param filename: the new filename if it changes
todo: make split up into smaller functions
"""
self.active_dimension = 0
for dimension in self.dimensions.values():
dimension: mcpython.engine.world.AbstractInterface.IDimension
for chunk in dimension.chunks.values():
chunk.hide_all()
del chunk
dimension.chunks.clear()
if remove_dims:
self.dimensions.clear()
shared.dimension_handler.init_dims()
[
await inventory.on_world_cleared()
for inventory in shared.inventory_handler.containers
]
await self.reset_config()
if shared.IS_CLIENT:
player = shared.world.get_active_player(create=False)
if player is not None:
player.flying = False
for inv in shared.world.get_active_player().get_inventories():
inv.clear()
self.spawn_point = (random.randint(0, 15), random.randint(0, 15))
shared.world_generation_handler.task_handler.clear()
await shared.entity_manager.clear()
self.players.clear()
if filename is not None:
self.setup_by_filename(filename)
await mcpython.common.data.DataPacks.datapack_handler.cleanup()
await shared.event_handler.call_async("world:clean")
def setup_by_filename(self, filename: str):
"""
will set up the system for an new file name
:param filename: the file name to use
"""
self.filename = filename if filename is not None else "tmp"
self.save_file = mcpython.common.world.SaveFile.SaveFile(self.filename)
|
StarcoderdataPython
|
1617929
|
# Func04.py
def Sum(*args):
return sum(args)
print(Sum(20, 10)) #30
print(Sum(20, 10, 5)) #35
print(Sum(20, 10, 5, 3)) #8
b = [10, 20, 30, 40, 50]
print(Sum(*b))
|
StarcoderdataPython
|
3286113
|
<reponame>PepSalehi/algorithms<gh_stars>0
#!/usr/bin/env python
"""Get all page names of a given language."""
import json
import requests
def query(lang, query):
query = "&".join(query)
q = (u"https://{lang}.wikipedia.org/w/api.php?action=query&{query}"
"&format=json"
.format(lang=lang, query=query))
r = requests.get(q)
return json.loads(r.text)
def get_all_page_titles(lang, apcontinue='', max_pages=float('inf')):
page_titles = []
apcontinue = True
q = ["list=allpages", "aplimit=2", "apcontinue={}".format(apcontinue)]
while apcontinue:
result = query(lang, q)
print(result['query']['allpages'][0])
page_titles += [(p['title'], p['pageid'])
for p in result['query']['allpages']]
if 'continue' not in result:
print("continue not in result")
apcontinue = None
break
apcontinue = result['continue']['apcontinue']
q[2] = u"apcontinue={}".format(apcontinue)
if len(page_titles) > max_pages:
print("max_pages reached")
break
return {'page_titles': page_titles, 'continue': apcontinue}
lang = 'cy'
page_titles = get_all_page_titles(lang)
print(len(page_titles['page_titles']))
print(page_titles['continue'])
|
StarcoderdataPython
|
39829
|
import dash
import dash_bio as dashbio
import dash_html_components as html
import dash_core_components as dcc
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
'Select which chromosomes to display on the ideogram below:',
dcc.Dropdown(
id='displayed-chromosomes',
options=[{'label': str(i), 'value': str(i)} for i in range(1, 23)],
multi=True,
value=[str(i) for i in range(1, 23)]
),
dashbio.Ideogram(
id='my-dashbio-ideogram'
),
html.Div(id='ideogram-rotated')
])
@app.callback(
dash.dependencies.Output('my-dashbio-ideogram', 'chromosomes'),
[dash.dependencies.Input('displayed-chromosomes', 'value')]
)
def update_ideogram(value):
return value
@app.callback(
dash.dependencies.Output('ideogram-rotated', 'children'),
[dash.dependencies.Input('my-dashbio-ideogram', 'rotated')]
)
def update_ideogram_rotated(rot):
return 'You have {} selected a chromosome.'.format(
'' if rot else 'not')
if __name__ == '__main__':
app.run_server(debug=True)
|
StarcoderdataPython
|
1624653
|
<gh_stars>1-10
"""Anagrafica app."""
from fattureincloud.models.base import Resource
class Soggetto(Resource):
"""Soggetto class."""
def lista(self, _id="", filtro="", nome="", cf="", piva="", pagina=1):
"""Return list of elements filtered by given parameters if set."""
payload = {
"id": _id,
"filtro": filtro,
"nome": nome,
"cf": cf,
"piva": piva,
"pagina": pagina,
}
return super().lista(**payload)
def nuovo(self, **kwargs):
"""Create new soggetto."""
raise NotImplementedError
def importa(self, **kwargs):
"""Import a list of soggetto."""
raise NotImplementedError
def modifica(self, **kwargs):
"""Update soggetto."""
raise NotImplementedError
def elimina(self, **kwargs):
"""Delete soggetto."""
raise NotImplementedError
class Clienti(Soggetto):
"""Clienti class."""
list_key = "lista_clienti"
class Fornitori(Soggetto):
"""Fornitori class."""
list_key = "lista_fornitori"
|
StarcoderdataPython
|
1763256
|
<reponame>railnova/raven-cron<gh_stars>0
from getpass import getuser
from os import getenv, path, SEEK_END
from raven import Client
from subprocess import call
from tempfile import TemporaryFile
from argparse import ArgumentParser
import argparse
from sys import argv, stderr
from time import time
from .version import VERSION
MAX_MESSAGE_SIZE = 4096
parser = ArgumentParser(
description='Wraps commands and reports failing ones to sentry.',
epilog='SENTRY_DSN can also be passed as an environment variable.',
)
parser.add_argument(
'--dsn',
metavar='SENTRY_DSN',
default=getenv('SENTRY_DSN'),
help='Sentry server address',
)
parser.add_argument(
'--always',
action='store_true',
help='Report results to sentry even if the command exits successfully.'
)
parser.add_argument(
'--logger',
default='cron'
)
parser.add_argument(
'--description',
default=None
)
parser.add_argument(
'--version',
action='version',
version=VERSION,
)
parser.add_argument(
'cmd',
nargs=argparse.REMAINDER,
help='The command to run',
)
def update_dsn(opts):
"""Update the Sentry DSN stored in local configs
It's assumed that the file contains a DSN endpoint like this:
https://public_key:[email protected]/project_id
It could easily be extended to override all settings if there
were more use cases.
"""
homedir = path.expanduser('~%s' % getuser())
home_conf_file = path.join(homedir, '.raven-cron')
system_conf_file = '/etc/raven-cron.conf'
conf_precedence = [home_conf_file, system_conf_file]
for conf_file in conf_precedence:
if path.exists(conf_file):
with open(conf_file, "r") as conf:
opts.dsn = conf.read().rstrip()
return
def run(args=argv[1:]):
opts = parser.parse_args(args)
# Command line takes precendence, otherwise check for local configs
if not opts.dsn:
update_dsn(opts)
runner = CommandReporter(**vars(opts))
runner.run()
class CommandReporter(object):
def __init__(self, cmd, dsn, always, logger, description):
self.dsn = dsn
self.command = " ".join(cmd)
self.always = always
self.client = None
self.logger = logger
self.description = description
def run(self):
buf = TemporaryFile()
start = time()
exit_status = call(self.command, stdout=buf, stderr=buf, shell=True)
if exit_status > 0 or self.always == True:
elapsed = int((time() - start) * 1000)
self.report_fail(exit_status, buf, elapsed)
buf.close()
def report_fail(self, exit_status, buf, elapsed):
if self.dsn is None:
print >>stderr, "No DSN for raven-cron configured, cannot report failure of script:", self.command
buf.seek(0)
print >>stderr, buf.read()
return
# Hack to get the file size since the tempfile doesn't exist anymore
buf.seek(0, SEEK_END)
file_size = buf.tell()
if file_size < MAX_MESSAGE_SIZE:
buf.seek(0)
last_lines = buf.read()
else:
buf.seek(-(MAX_MESSAGE_SIZE-3), SEEK_END)
last_lines = '...' + buf.read()
if self.description:
message=self.description
elif exit_status != 0:
message="Command \"%s\" failed" % (self.command,)
else:
message="Command \"%s\" report" % (self.command,)
if self.client is None:
self.client = Client(dsn=self.dsn)
self.client.captureMessage(
message,
data={
'logger': self.logger,
},
extra={
'command': self.command,
'exit_status': exit_status,
'last_lines': last_lines,
},
time_spent=elapsed
)
|
StarcoderdataPython
|
153036
|
# 2016. Vlachos Group Ge<NAME>. University of Delaware.
|
StarcoderdataPython
|
1660162
|
'''
May 2017
@author: <NAME>
'''
import unittest
class GuiUnitTests(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1639475
|
<reponame>omnivector-solutions/charm-fluentbit
"""Fluentbit operations."""
import logging
import shlex
import subprocess
import shutil
from pathlib import Path
from typing import List
from jinja2 import Environment, FileSystemLoader
from utils import operating_system
logger = logging.getLogger()
class FluentbitOps:
"""Fluentbit ops."""
def __init__(self):
"""Initialize class."""
logger.debug("## Initializing FluentBitOps")
self._template_dir = Path(__file__).parent.parent / "templates"
self._systemd_service = "td-agent-bit.service"
self._config_path = Path("/etc/td-agent-bit")
def install(self) -> bool:
"""Install fluentbit on the machine.
Returns:
bool: whether the installation was succesfull or not
"""
os_ = operating_system()
if "ubuntu" == os_[0]:
return self._install_on_ubuntu()
elif "centos" == os_[0]:
return self._install_on_centos()
else:
logger.error(f"## Unsupported operating system: {os_}")
return False
def _install_on_ubuntu(self) -> bool:
"""Install fluentbit on Ubuntu 20.04.
Returns:
bool: whether the installation was succesfull or not
"""
logger.debug("## Configuring APT to install fluentbit on Ubuntu")
try:
key = self._template_dir / "fluentbit.key"
cmd = f"apt-key add {key.as_posix()}"
subprocess.check_output(shlex.split(cmd))
repo = "deb https://packages.fluentbit.io/ubuntu/focal focal main"
cmd = f'add-apt-repository "{repo}"'
subprocess.check_output(shlex.split(cmd))
logger.debug("## Installing fluentbit")
cmd = "apt-get install --yes td-agent-bit=1.8.15"
subprocess.check_output(shlex.split(cmd))
logger.debug("## Fluentbit installed")
return True
except subprocess.CalledProcessError as e:
logger.error(f"## Error installing fluentbit: {e}")
return False
def _install_on_centos(self) -> bool:
"""Install fluentbit on CentOS 7.
Returns:
bool: whether the installation was succesfull or not
"""
logger.debug("## Configuring yum to install fluentbit on Centos")
try:
repo = self._template_dir / "td-agent-bit.yum.repo"
target = Path("/etc/yum.repos.d/td-agent-bit.repo")
shutil.copyfile(repo, target)
except OSError as e:
logger.error(f"## Error setting yum repo: {e}")
return False
try:
logger.debug("## Installing fluentbit")
key = self._template_dir / "fluentbit.key"
shutil.copy(key, "/var/tmp/")
cmd = "yum install --assumeyes td-agent-bit-1.8.15"
subprocess.check_output(shlex.split(cmd))
logger.debug("## Fluentbit installed")
return True
except subprocess.CalledProcessError as e:
logger.error(f"## Error installing fluentbit: {e}")
return False
def restart(self) -> bool:
"""Restart the fluebtbit service.
If the service is not running, start it.
Returns:
bool: wether the process (re)started successfully.
"""
logger.debug(f"## Restarting {self._systemd_service}")
try:
cmd = f"systemctl restart {self._systemd_service}"
subprocess.check_output(shlex.split(cmd))
except subprocess.CalledProcessError as e:
logger.error(f"## Error restarting fluentbit: {e}")
return False
return self.is_active()
def is_active(self) -> bool:
"""Check wether the service is running."""
try:
cmd = f"systemctl is-active {self._systemd_service}"
r = subprocess.check_output(shlex.split(cmd))
return "active" == r.decode().strip().lower()
except subprocess.CalledProcessError as e:
logger.error(f'## Error checking fluentbit service: {e}')
return False
def stop(self):
"""Stop and disable the fluentbit service."""
logger.debug(f"## Stoping {self._systemd_service}")
try:
cmd = f"systemctl disable --now {self._systemd_service}"
subprocess.check_output(shlex.split(cmd))
except subprocess.CalledProcessError as e:
logger.error(f"## Error stoping fluentbit: {e}")
def uninstall(self):
"""Uninstall package.
Also removes custom repositories but not custom configuration files.
"""
os_ = operating_system()
if "ubuntu" == os_[0]:
self._uninstall_on_ubuntu()
elif "centos" == os_[0]:
self._uninstall_on_centos()
else:
logger.error(f"## Unsupported operating system: {os_}")
def _uninstall_on_ubuntu(self):
logger.debug("## Removing fluentbit package")
cmd = "apt-get purge --yes td-agent-bit"
subprocess.check_output(shlex.split(cmd))
logger.debug("## Removing fluentbit repository")
repo = "deb https://packages.fluentbit.io/ubuntu/focal focal main"
cmd = f'add-apt-repository --remove "{repo}"'
subprocess.check_output(shlex.split(cmd))
def _uninstall_on_centos(self):
logger.debug("## Removing fluentbit package")
cmd = "yum remove --assumeyes td-agent-bit"
subprocess.check_output(shlex.split(cmd))
logger.debug("## Removing fluentbit repository")
Path("/etc/yum.repos.d/td-agent-bit.repo").unlink()
def configure(self, cfg: List[dict]):
"""Configure Fluentbit and restart service."""
logger.debug("## Configurting fluentbit")
ctxt = {"inputs": list(),
"filters": list(),
"outputs": list(),
"parsers": list(),
"multiline_parsers": list()}
# separate input, output, and filter from parser and parser_multiline
for entry in cfg:
key = list(entry.keys())[0].lower()
if key == "input":
ctxt["inputs"].append(entry["input"])
elif key == "filter":
ctxt["filters"].append(entry["filter"])
elif key == "output":
ctxt["outputs"].append(entry["output"])
elif key == "parser":
ctxt["parsers"].append(entry["parser"])
elif key == "multiline_parser":
ctxt["multiline_parsers"].append(entry["multiline_parser"])
self._render_configs(ctxt)
self.restart()
def _render_configs(self, context):
"""Render the configuration files."""
environment = Environment(loader=FileSystemLoader(self._template_dir))
config = self._config_path / "td-agent-bit.conf"
logger.debug(f"## Redering {config}")
template = environment.get_template("td-agent-bit.conf.tmpl")
config.write_text(template.render(context))
parsers = self._config_path / "charm-parsers.conf"
logger.debug(f"## Redering {parsers}")
template = environment.get_template("parsers.conf.tmpl")
parsers.write_text(template.render(context))
|
StarcoderdataPython
|
82824
|
"""Delete permission template API method."""
from ibsng.handler.handler import Handler
class deletePermTemplate(Handler):
"""Delete permission template method class."""
def control(self):
"""Validate inputs after method setup.
:return: None
:rtype: None
"""
self.is_valid(self.perm_template_name, str)
def setup(self, perm_template_name):
"""Setup required parameters.
:param str perm_template_name: permission template name
:return: None
:rtype: None
"""
self.perm_template_name = perm_template_name
|
StarcoderdataPython
|
12505
|
from ex01.funcoes import *
def arqExiste(nome):
try:
a = open(nome, 'rt') #rt = read text
a.close()
except FileNotFoundError:
return False
else:
return True
def criarArq(nome):
try:
a = open(nome, 'wt+') #wt = write text and + = create one if it not exists
a.close()
except:
print('Hove um erro na criacao do arquivo')
else:
print('Arquivo criado com sucesso')
def lerArq(nome):
try:
a = open(nome, 'rt')
except:
print('Hove um erro na leitura do arquivo')
else:
cabecalho('PESSOAS CADASTRADAS')
for linha in a:
dado = linha.split(';')
dado[1] = dado[1].replace('\n', '')
print(f'{dado[0]:<30}{dado[1]:>3} anos')
finally:
a.close()
def cadastrar(arquivo, nome='desconhecido', idade=0):
try:
a = open(arquivo, 'at') # at = append no txt
except:
print('Houve um erro ao cadastrar')
else:
try:
a.write(f'{nome};{idade}\n')
except:
print('Houve erro ao executar a.write')
else:
print('Novo registro adicionado com sucesso')
a.close()
|
StarcoderdataPython
|
11236
|
"""
Totally untested file. Will be removed in subsequent commits
"""
import tensorflow as tf
import matplotlib.image as mpimg
import numpy as np
from math import ceil, floor
import os
IMAGE_SIZE = 720
def central_scale_images(X_imgs, scales):
# Various settings needed for Tensorflow operation
boxes = np.zeros((len(scales), 4), dtype = np.float32)
for index, scale in enumerate(scales):
x1 = y1 = 0.5 - 0.5 * scale # To scale centrally
x2 = y2 = 0.5 + 0.5 * scale
boxes[index] = np.array([y1, x1, y2, x2], dtype = np.float32)
box_ind = np.zeros((len(scales)), dtype = np.int32)
crop_size = np.array([IMAGE_SIZE, IMAGE_SIZE], dtype = np.int32)
X_scale_data = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (1, IMAGE_SIZE, IMAGE_SIZE, 3))
# Define Tensorflow operation for all scales but only one base image at a time
tf_img = tf.image.crop_and_resize(X, boxes, box_ind, crop_size)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img_data in X_imgs:
batch_img = np.expand_dims(img_data, axis = 0)
scaled_imgs = sess.run(tf_img, feed_dict = {X: batch_img})
X_scale_data.extend(scaled_imgs)
X_scale_data = np.array(X_scale_data, dtype = np.float32)
return X_scale_data
from math import ceil, floor
def get_translate_parameters(index):
if index == 0: # Translate left 20 percent
offset = np.array([0.0, 0.2], dtype = np.float32)
size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32)
w_start = 0
w_end = int(ceil(0.8 * IMAGE_SIZE))
h_start = 0
h_end = IMAGE_SIZE
elif index == 1: # Translate right 20 percent
offset = np.array([0.0, -0.2], dtype = np.float32)
size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32)
w_start = int(floor((1 - 0.8) * IMAGE_SIZE))
w_end = IMAGE_SIZE
h_start = 0
h_end = IMAGE_SIZE
elif index == 2: # Translate top 20 percent
offset = np.array([0.2, 0.0], dtype = np.float32)
size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32)
w_start = 0
w_end = IMAGE_SIZE
h_start = 0
h_end = int(ceil(0.8 * IMAGE_SIZE))
else: # Translate bottom 20 percent
offset = np.array([-0.2, 0.0], dtype = np.float32)
size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32)
w_start = 0
w_end = IMAGE_SIZE
h_start = int(floor((1 - 0.8) * IMAGE_SIZE))
h_end = IMAGE_SIZE
return offset, size, w_start, w_end, h_start, h_end
def translate_images(X_imgs):
offsets = np.zeros((len(X_imgs), 2), dtype = np.float32)
n_translations = 4
X_translated_arr = []
tf.reset_default_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(n_translations):
X_translated = np.zeros((len(X_imgs), IMAGE_SIZE, IMAGE_SIZE, 3),
dtype = np.float32)
X_translated.fill(0.0) # Filling background color
base_offset, size, w_start, w_end, h_start, h_end = get_translate_parameters(i)
offsets[:, :] = base_offset
glimpses = tf.image.extract_glimpse(X_imgs, size, offsets)
glimpses = sess.run(glimpses)
X_translated[:, h_start: h_start + size[0], \
w_start: w_start + size[1], :] = glimpses
X_translated_arr.extend(X_translated)
X_translated_arr = np.array(X_translated_arr, dtype = np.float32)
return X_translated_arr
def rotate_images(X_imgs):
X_rotate = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
k = tf.placeholder(tf.int32)
tf_img = tf.image.rot90(X, k = k)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img in X_imgs:
for i in range(3): # Rotation at 90, 180 and 270 degrees
rotated_img = sess.run(tf_img, feed_dict = {X: img, k: i + 1})
X_rotate.append(rotated_img)
X_rotate = np.array(X_rotate, dtype = np.float32)
return X_rotate
def flip_images(X_imgs):
X_flip = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
tf_img1 = tf.image.flip_left_right(X)
tf_img2 = tf.image.flip_up_down(X)
tf_img3 = tf.image.transpose_image(X)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img in X_imgs:
flipped_imgs = sess.run([tf_img1, tf_img2, tf_img3], feed_dict = {X: img})
X_flip.extend(flipped_imgs)
X_flip = np.array(X_flip, dtype = np.float32)
return X_flip
# Produce each image at scaling of 90%, 75% and 60% of original image.
X_imgs = os.listdir("/home/pallab/gestures-cnn/images/resized/")
scaled_imgs = central_scale_images(X_imgs, [0.90, 0.75, 0.60])
translated_imgs = translate_images(X_imgs)
rotated_imgs = rotate_images(X_imgs)
flipped_images = flip_images(X_imgs)
|
StarcoderdataPython
|
3301050
|
<reponame>code-watch/meltano<gh_stars>0
import click
import json
from . import cli
from .params import project
from meltano.core.db import project_engine
from meltano.core.project import Project
from meltano.core.plugin import PluginType
from meltano.core.config_service import ConfigService
from meltano.core.plugin.settings_service import (
PluginSettingsService,
SettingValueSource,
SettingValueStore,
)
@cli.group(invoke_without_command=True)
@click.option(
"--plugin-type", type=click.Choice(PluginType.cli_arguments()), default=None
)
@click.argument("plugin_name")
@click.option("--format", type=click.Choice(["json", "env"]), default="json")
@project(migrate=True)
@click.pass_context
def config(ctx, project, plugin_type, plugin_name, format):
plugin_type = PluginType.from_cli_argument(plugin_type) if plugin_type else None
config = ConfigService(project)
plugin = config.find_plugin(plugin_name, plugin_type=plugin_type, configurable=True)
_, Session = project_engine(project)
session = Session()
try:
settings = PluginSettingsService(project).build(plugin)
ctx.obj["settings"] = settings
ctx.obj["session"] = session
if ctx.invoked_subcommand is None:
if format == "json":
config = settings.as_config(session=session)
print(json.dumps(config))
elif format == "env":
for env, value in settings.as_env(session=session).items():
print(f"{env}={value}")
finally:
session.close()
@config.command()
@click.argument("setting_name", nargs=-1, required=True)
@click.argument("value")
@click.option(
"--store",
type=click.Choice(list(SettingValueStore)),
default=SettingValueStore.MELTANO_YML,
)
@click.pass_context
def set(ctx, setting_name, value, store):
settings = ctx.obj["settings"]
session = ctx.obj["session"]
path = list(setting_name)
settings.set(path, value, store=store, session=session)
@config.command()
@click.argument("setting_name", nargs=-1, required=True)
@click.option(
"--store",
type=click.Choice(list(SettingValueStore)),
default=SettingValueStore.MELTANO_YML,
)
@click.pass_context
def unset(ctx, setting_name, store):
settings = ctx.obj["settings"]
session = ctx.obj["session"]
path = list(setting_name)
settings.unset(path, store=store, session=session)
@config.command()
@click.option(
"--store",
type=click.Choice(list(SettingValueStore)),
default=SettingValueStore.MELTANO_YML,
)
@click.pass_context
def reset(ctx, store):
settings = ctx.obj["settings"]
session = ctx.obj["session"]
settings.reset(store=store, session=session)
@config.command("list")
@click.pass_context
def list_settings(ctx):
settings = ctx.obj["settings"]
session = ctx.obj["session"]
full_config = settings.config_with_metadata(session=session)
for name, config_metadata in full_config.items():
value = config_metadata["value"]
source = config_metadata["source"]
setting_def = config_metadata["setting"]
if setting_def._custom:
click.echo("custom: ", nl=False)
click.secho(name, fg="blue", nl=False)
env_key = settings.setting_env(setting_def)
click.echo(f" [env: {env_key}]", nl=False)
current_value = click.style(f"{value!r}", fg="green")
if source is SettingValueSource.DEFAULT:
click.echo(f" current value: {current_value}", nl=False)
# The default value and the current value may not match
# if env vars have been expanded
if setting_def.value == value:
click.echo(" (from default)")
else:
click.echo(f" (from default: {setting_def.value!r})")
else:
if setting_def.value is not None:
click.echo(f" (default: {setting_def.value!r})", nl=False)
click.echo(f" current value: {current_value} (from {source.label})")
if setting_def.description:
click.echo("\t", nl=False)
if setting_def.label:
click.echo(f"{setting_def.label}: ", nl=False)
click.echo(f"{setting_def.description}")
|
StarcoderdataPython
|
94098
|
<gh_stars>1-10
from django.conf import settings
SLACK_VERIFICATION_TOKEN = settings.SLACK_VERIFICATION_TOKEN
SLACK_BOT_TOKEN = settings.SLACK_BOT_TOKEN
import logging
logging.getLogger().setLevel(logging.INFO)
from pyee import EventEmitter
from slacker import Slacker
CLIENT = Slacker(SLACK_BOT_TOKEN)
class SlackEventAdapter(EventEmitter):
def __init__(self, verification_token):
EventEmitter.__init__(self)
self.verification_token = verification_token
slack_events_adapter = SlackEventAdapter(SLACK_VERIFICATION_TOKEN)
# Example responder to greetings
@slack_events_adapter.on("message")
def handle_message(event_data):
message = event_data["event"]
# If the incoming message contains "hi", then respond with a "Hello" message
if message.get("subtype") is None and "hi" in message.get('text'):
channel = message["channel"]
message = "Hello <@%s>! :tada:" % message["user"]
logging.info("chat.postMessage: channel: %s text: %s" % (channel, message))
CLIENT.chat.post_message(channel, message)
# Example reaction emoji echo
@slack_events_adapter.on("reaction_added")
def reaction_added(event_data):
event = event_data["event"]
emoji = event["reaction"]
channel = event["item"]["channel"]
text = ":%s:" % emoji
logging.info("chat.postMessage: channel: %s text: %s" % (channel, text))
CLIENT.chat.post_message(channel, text)
|
StarcoderdataPython
|
1703438
|
# static analysis: ignore
from .test_name_check_visitor import TestNameCheckVisitorBase
from .test_node_visitor import assert_passes
from .value import (
NO_RETURN_VALUE,
AnnotatedValue,
AnySource,
AnyValue,
KVPair,
assert_is_value,
CallableValue,
GenericValue,
SequenceIncompleteValue,
KnownValue,
TypedValue,
DictIncompleteValue,
SubclassValue,
MultiValuedValue,
make_weak,
)
class TestSuperCall(TestNameCheckVisitorBase):
@assert_passes()
def test_basic(self):
# there was a bug where we would insert the 'self' argument twice for super methods
class Cachiyacuy(object):
def eat_food(self):
pass
class Acouchy(Cachiyacuy):
def do_it(self):
return self.eat_food()
def eat_food(self):
super(Acouchy, self).eat_food()
@assert_passes()
def test_super_no_args(self):
class Canaanimys:
def __init__(self, a, b):
super().__init__()
@assert_passes()
def test_super_no_args_wrong_args(self):
class Gaudeamus:
def eat(self):
pass
class Canaanimys(Gaudeamus):
def eat(self, grass=None):
super(Canaanimys, self).eat(grass) # E: incompatible_call
@assert_passes()
def test_super_no_args_wrong_args_classmethod(self):
class Gaudeamus:
@classmethod
def eat(cls):
pass
class Canaanimys(Gaudeamus):
@classmethod
def eat(cls, grass):
super().eat(grass) # E: incompatible_call
@assert_passes()
def test_super_no_args_in_comprehension(self):
class Canaanimys:
def __init__(self, a, b):
self.x = [super().__init__() for _ in range(1)] # E: bad_super_call
@assert_passes()
def test_super_no_args_in_gen_exp(self):
class Canaanimys:
def __init__(self, a, b):
self.x = (super().__init__() for _ in range(1)) # E: bad_super_call
@assert_passes()
def test_super_no_args_in_nested_function(self):
class Canaanimys:
def __init__(self, a, b):
def nested():
self.x = super().__init__() # E: bad_super_call
nested()
@assert_passes()
def test_super_init_subclass(self):
class Pithanotomys:
def __init_subclass__(self):
super().__init_subclass__()
@assert_passes()
def test_good_super_call(self):
from pyanalyze.tests import wrap, PropertyObject
@wrap
class Tainotherium(PropertyObject):
def non_async_method(self):
super(Tainotherium.base, self).non_async_method()
@assert_passes()
def test_bad_super_call(self):
from pyanalyze.tests import wrap, PropertyObject
@wrap
class Tainotherium2(PropertyObject):
def non_async_method(self):
super(Tainotherium2, self).non_async_method() # E: bad_super_call
@assert_passes()
def test_first_arg_is_base(self):
class Base1(object):
def method(self):
pass
class Base2(Base1):
def method(self):
pass
class Child(Base2):
def method(self):
super(Base2, self).method() # E: bad_super_call
@assert_passes()
def test_bad_super_call_classmethod(self):
from pyanalyze.tests import wrap, PropertyObject
@wrap
class Tainotherium3(PropertyObject):
@classmethod
def no_args_classmethod(cls):
super(Tainotherium3, cls).no_args_classmethod() # E: bad_super_call
@assert_passes()
def test_super_attribute(self):
class MotherCapybara(object):
def __init__(self, grass):
pass
class ChildCapybara(MotherCapybara):
def __init__(self):
super(ChildCapybara, self).__init__() # E: incompatible_call
@assert_passes()
def test_undefined_super_attribute(self):
class MotherCapybara(object):
pass
class ChildCapybara(MotherCapybara):
@classmethod
def toggle(cls):
super(ChildCapybara, cls).toggle() # E: undefined_attribute
@assert_passes()
def test_metaclass(self):
class CapybaraType(type):
def __init__(self, name, bases, attrs):
super(CapybaraType, self).__init__(name, bases, attrs)
class Capybara(metaclass=CapybaraType):
pass
@assert_passes()
def test_mixin(self):
class Base(object):
@classmethod
def eat(cls):
pass
class Mixin(object):
@classmethod
def eat(cls):
super(Mixin, cls).eat()
class Capybara(Mixin, Base):
pass
class TestSequenceImpl(TestNameCheckVisitorBase):
@assert_passes()
def test(self):
from typing import Sequence
from typing_extensions import Literal
def capybara(x, ints: Sequence[Literal[1, 2]]):
# no arguments
assert_is_value(set(), KnownValue(set()))
assert_is_value(list(), KnownValue([]))
# KnownValue
assert_is_value(tuple([1, 2, 3]), KnownValue((1, 2, 3)))
# Comprehensions
one_two = MultiValuedValue([KnownValue(1), KnownValue(2)])
assert_is_value(tuple(i for i in ints), GenericValue(tuple, [one_two]))
assert_is_value(tuple({i: i for i in ints}), GenericValue(tuple, [one_two]))
# SequenceIncompleteValue
assert_is_value(
tuple([int(x)]), SequenceIncompleteValue(tuple, [TypedValue(int)])
)
# fallback
assert_is_value(
tuple(x), GenericValue(tuple, [AnyValue(AnySource.generic_argument)])
)
# argument that is iterable but does not have __iter__
assert_is_value(tuple(str(x)), GenericValue(tuple, [TypedValue(str)]))
@assert_passes()
def test_not_iterable(self):
def capybara(x):
tuple(3) # E: unsupported_operation
tuple(int(x)) # E: unsupported_operation
class TestFormat(TestNameCheckVisitorBase):
@assert_passes()
def test_basic(self):
def capybara():
assert_is_value("{}".format(0), TypedValue(str))
assert_is_value("{x}".format(x=0), TypedValue(str))
assert_is_value("{} {x.imag!r:.2d}".format(0, x=0), TypedValue(str))
assert_is_value("{x[0]} {y[x]}".format(x=[0], y={"x": 0}), TypedValue(str))
assert_is_value("{{X}} {}".format(0), TypedValue(str))
assert_is_value("{0:.{1:d}e}".format(0, 1), TypedValue(str))
assert_is_value("{:<{width}}".format("", width=1), TypedValue(str))
@assert_passes()
def test_errors(self):
def out_of_range_implicit():
"{} {}".format(0) # E: incompatible_call
def out_of_range_numbered():
"{0} {1}".format(0) # E: incompatible_call
def out_of_range_named():
"{x}".format(y=3) # E: incompatible_call
def unused_numbered():
"{}".format(0, 1) # E: incompatible_call
def unused_names():
"{x}".format(x=0, y=1) # E: incompatible_call
@assert_passes()
def test_union(self):
def capybara(cond):
if cond:
template = "{a} {b}"
else:
template = "{a} {b} {c}"
string = template.format(a="a", b="b", c="c")
assert_is_value(string, TypedValue(str))
class TestTypeMethods(TestNameCheckVisitorBase):
@assert_passes()
def test(self):
class Capybara(object):
def __init__(self, name):
pass
def foo(self):
print(Capybara.__subclasses__())
class TestEncodeDecode(TestNameCheckVisitorBase):
@assert_passes()
def test(self):
def capybara(s: str, b: bytes):
assert_is_value(s.encode("utf-8"), TypedValue(bytes))
assert_is_value(b.decode("utf-8"), TypedValue(str))
@assert_passes()
def test_encode_wrong_type(self):
def capybara():
# TODO this should produce only one error
"".encode(42) # E: incompatible_call # E: incompatible_argument
@assert_passes()
def test_decode_wrong_type(self):
def capybara():
b"".decode(42) # E: incompatible_call # E: incompatible_argument
class TestLen(TestNameCheckVisitorBase):
@assert_passes()
def test(self):
def capybara(x):
assert_is_value(len("a"), KnownValue(1))
assert_is_value(len(list(x)), TypedValue(int))
# if we don't know the type, there should be no error
len(x)
@assert_passes()
def test_narrowing(self):
def capybara(cond):
lst = () if cond else (1,)
assert_is_value(lst, MultiValuedValue([KnownValue(()), KnownValue((1,))]))
if len(lst) == 1:
assert_is_value(lst, KnownValue((1,)))
else:
assert_is_value(lst, KnownValue(()))
if len(lst) > 0:
assert_is_value(lst, KnownValue((1,)))
else:
assert_is_value(lst, KnownValue(()))
@assert_passes()
def test_wrong_type(self):
def capybara():
len(3) # E: incompatible_argument
class TestCast(TestNameCheckVisitorBase):
@assert_passes()
def test(self):
from typing import cast, List
def capybara():
assert_is_value(cast(str, 1), TypedValue(str))
assert_is_value(cast("str", 1), TypedValue(str))
assert_is_value(cast("List[str]", 1), GenericValue(list, [TypedValue(str)]))
@assert_passes()
def test_undefined_name(self):
from typing import cast, List
def capybara():
cast("List[fail]", 1) # E: undefined_name
class TestSubclasses(TestNameCheckVisitorBase):
@assert_passes()
def test(self):
class Parent:
pass
class Child(Parent):
pass
def capybara(typ: type):
assert_is_value(
typ.__subclasses__(), GenericValue(list, [TypedValue(type)])
)
assert_is_value(Parent.__subclasses__(), KnownValue([Child]))
class TestGenericMutators(TestNameCheckVisitorBase):
@assert_passes()
def test_list_append(self):
from typing import List
def capybara(x: int):
lst = [x]
assert_is_value(lst, SequenceIncompleteValue(list, [TypedValue(int)]))
lst.append(1)
assert_is_value(
lst, SequenceIncompleteValue(list, [TypedValue(int), KnownValue(1)])
)
lst: List[str] = ["x"]
assert_is_value(lst, GenericValue(list, [TypedValue(str)]))
lst.append("y")
assert_is_value(lst, GenericValue(list, [TypedValue(str)]))
lst = ["x"]
assert_is_value(lst, KnownValue(["x"]))
lst.append(3)
assert_is_value(lst, KnownValue(["x", 3]))
@assert_passes()
def test_list_append_pos_only(self):
from typing import List
def capybara(lst: List[int]) -> None:
lst.append(object=42) # E: incompatible_call
@assert_passes()
def test_list_append_wrong_type(self):
from typing import List
def capybara():
lst: List[str] = ["x"]
assert_is_value(lst, GenericValue(list, [TypedValue(str)]))
lst.append(1) # E: incompatible_argument
@assert_passes()
def test_set_add(self):
from typing import Set
def capybara(x: int):
lst = {x}
assert_is_value(lst, SequenceIncompleteValue(set, [TypedValue(int)]))
lst.add(1)
assert_is_value(
lst, SequenceIncompleteValue(set, [TypedValue(int), KnownValue(1)])
)
lst: Set[str] = {"x"}
assert_is_value(lst, GenericValue(set, [TypedValue(str)]))
lst.add("y")
assert_is_value(lst, GenericValue(set, [TypedValue(str)]))
@assert_passes()
def test_list_add(self):
from typing import List
def capybara(x: int, y: str) -> None:
assert_is_value(
[x] + [y],
SequenceIncompleteValue(list, [TypedValue(int), TypedValue(str)]),
)
assert_is_value(
[x] + [1],
SequenceIncompleteValue(list, [TypedValue(int), KnownValue(1)]),
)
left: List[int] = []
right: List[str] = []
assert_is_value(
left + right,
GenericValue(
list, [MultiValuedValue([TypedValue(int), TypedValue(str)])]
),
)
assert_is_value(left + left, GenericValue(list, [TypedValue(int)]))
union_list1 = left if x else right
union_list2 = left if y else right
assert_is_value(
# need to call list.__add__ directly because we just give up on unions
# in the binop implementation
list.__add__(union_list1, union_list2),
MultiValuedValue(
[
GenericValue(list, [TypedValue(int)]),
GenericValue(
list, [MultiValuedValue([TypedValue(int), TypedValue(str)])]
),
GenericValue(
list, [MultiValuedValue([TypedValue(str), TypedValue(int)])]
),
GenericValue(list, [TypedValue(str)]),
]
),
)
@assert_passes()
def test_list_extend(self):
from typing import List
def capybara(x: int, y: str) -> None:
lst = [x]
assert_is_value(lst, SequenceIncompleteValue(list, [TypedValue(int)]))
lst.extend([y])
assert_is_value(
lst, SequenceIncompleteValue(list, [TypedValue(int), TypedValue(str)])
)
# If we extend with a set, don't use a SequenceIncompleteValue any more,
# because we don't know how many values were added or in what order.
# (Technically we do know for a one-element set, but that doesn't seem worth
# writing a special case for.)
lst.extend({float(1.0)})
assert_is_value(
lst,
make_weak(
GenericValue(
list,
[
MultiValuedValue(
[TypedValue(int), TypedValue(str), TypedValue(float)]
)
],
)
),
)
lst: List[int] = [3]
assert_is_value(lst, GenericValue(list, [TypedValue(int)]))
lst.extend([x])
assert_is_value(lst, GenericValue(list, [TypedValue(int)]))
@assert_passes()
def test_list_iadd(self):
from typing import List
def capybara(x: int, y: str) -> None:
lst = [x]
assert_is_value(lst, SequenceIncompleteValue(list, [TypedValue(int)]))
lst += [y]
assert_is_value(
lst, SequenceIncompleteValue(list, [TypedValue(int), TypedValue(str)])
)
# If we extend with a set, don't use a SequenceIncompleteValue any more,
# because we don't know how many values were added or in what order.
# (Technically we do know for a one-element set, but that doesn't seem worth
# writing a special case for.)
lst += {float(1.0)}
assert_is_value(
lst,
make_weak(
GenericValue(
list,
[
MultiValuedValue(
[TypedValue(int), TypedValue(str), TypedValue(float)]
)
],
)
),
)
lst: List[int] = [3]
assert_is_value(lst, GenericValue(list, [TypedValue(int)]))
lst += [x]
assert_is_value(lst, GenericValue(list, [TypedValue(int)]))
@assert_passes()
def test_list_iadd_never(self):
def render_feedback_text():
z = []
detail_text = None
if detail_text:
assert_is_value(detail_text, NO_RETURN_VALUE)
z += detail_text
return z
@assert_passes()
def test_weak_value(self):
from typing import List
from typing_extensions import Literal
def func() -> List[Literal["c", "d"]]:
return ["d", "c"]
def capybara() -> None:
lst = ["a", "b"]
assert_is_value(lst, KnownValue(["a", "b"]))
lst.extend(func())
assert_is_value(
lst,
make_weak(
GenericValue(
list,
[
MultiValuedValue(
[
KnownValue("a"),
KnownValue("b"),
KnownValue("c"),
KnownValue("d"),
]
)
],
)
),
)
lst.extend(["e"])
assert_is_value(
lst,
make_weak(
GenericValue(
list,
[
MultiValuedValue(
[
KnownValue("a"),
KnownValue("b"),
KnownValue("c"),
KnownValue("d"),
KnownValue("e"),
]
)
],
)
),
)
lst.append("f")
assert_is_value(
lst,
make_weak(
GenericValue(
list,
[
MultiValuedValue(
[
KnownValue("a"),
KnownValue("b"),
KnownValue("c"),
KnownValue("d"),
KnownValue("e"),
KnownValue("f"),
]
)
],
)
),
)
@assert_passes()
def test_starred_weak(self):
from typing import List
from typing_extensions import Literal
def capybara(arg) -> None:
lst1: List[Literal["a"]] = ["a" for _ in arg]
lst2 = [*lst1, "b"]
assert_is_value(
lst2,
make_weak(
GenericValue(
list, [MultiValuedValue([KnownValue("a"), KnownValue("b")])]
)
),
)
lst2.append("c")
assert_is_value(
lst2,
make_weak(
GenericValue(
list,
[
MultiValuedValue(
[KnownValue("a"), KnownValue("b"), KnownValue("c")]
)
],
)
),
)
@assert_passes()
def test_list_extend_wrong_type(self):
from typing import List
def capybara():
lst: List[int] = [3]
lst.extend([str(3)]) # E: incompatible_argument
@assert_passes()
def test_list_extend_union(self):
def capybara(cond):
if cond:
lst = [1 for _ in cond]
else:
lst = [2 for _ in cond]
assert_is_value(
lst,
MultiValuedValue(
[
make_weak(GenericValue(list, [KnownValue(1)])),
make_weak(GenericValue(list, [KnownValue(2)])),
]
),
)
lst.extend([3, 4])
# TODO: this is wrong; it drops all but the last Union member
assert_is_value(
lst,
make_weak(
GenericValue(
list,
[
MultiValuedValue(
[KnownValue(2), KnownValue(3), KnownValue(4)]
)
],
)
),
)
@assert_passes()
def test_dict_get(self):
from typing_extensions import TypedDict, NotRequired
from typing import Dict
class TD(TypedDict):
a: int
b: str
c: NotRequired[str]
def capybara(td: TD, s: str, d: Dict[str, int], untyped: dict):
assert_is_value(td.get("a"), TypedValue(int))
assert_is_value(td.get("c"), TypedValue(str) | KnownValue(None))
assert_is_value(td.get("c", 1), TypedValue(str) | KnownValue(1))
td.get(1) # E: invalid_typeddict_key
known = {"a": "b"}
assert_is_value(known.get("a"), KnownValue("b") | KnownValue(None))
assert_is_value(known.get("b", 1), KnownValue(1))
assert_is_value(known.get(s), KnownValue("b") | KnownValue(None))
incomplete = {**td, "b": 1, "d": s}
assert_is_value(incomplete.get("a"), TypedValue(int) | KnownValue(None))
assert_is_value(incomplete.get("b"), KnownValue(1) | KnownValue(None))
assert_is_value(incomplete.get("d"), TypedValue(str) | KnownValue(None))
assert_is_value(incomplete.get("e"), KnownValue(None))
assert_is_value(d.get("x"), TypedValue(int) | KnownValue(None))
assert_is_value(d.get(s), TypedValue(int) | KnownValue(None))
d.get(1) # E: incompatible_argument
untyped.get([]) # E: unhashable_key
@assert_passes()
def test_setdefault(self):
from typing_extensions import TypedDict
from typing import Dict, Sequence
class TD(TypedDict):
a: int
b: str
def typeddict(td: TD):
td.setdefault({}) # E: unhashable_key
td.setdefault(0) # E: invalid_typeddict_key
td.setdefault("c") # E: invalid_typeddict_key
td.setdefault("a", "s") # E: incompatible_argument
assert_is_value(td.setdefault("b", "x"), TypedValue(str))
def dict_incomplete_value():
incomplete_value = {"a": str(TD)}
assert_is_value(
incomplete_value,
DictIncompleteValue(dict, [KVPair(KnownValue("a"), TypedValue(str))]),
)
assert_is_value(incomplete_value.setdefault("b"), KnownValue(None))
assert_is_value(
incomplete_value,
DictIncompleteValue(
dict,
[
KVPair(KnownValue("a"), TypedValue(str)),
KVPair(KnownValue("b"), KnownValue(None)),
],
),
)
assert_is_value(
incomplete_value.setdefault("a"),
MultiValuedValue([KnownValue(None), TypedValue(str)]),
)
assert_is_value(
incomplete_value,
DictIncompleteValue(
dict,
[
KVPair(KnownValue("a"), TypedValue(str)),
KVPair(KnownValue("b"), KnownValue(None)),
KVPair(KnownValue("a"), KnownValue(None), is_required=False),
],
),
)
def weak_typed(ints: Sequence[int]):
weak_dict = {i: str(i) for i in ints}
assert_is_value(
weak_dict,
make_weak(GenericValue(dict, [TypedValue(int), TypedValue(str)])),
)
assert_is_value(weak_dict.setdefault(3, str(TD)), TypedValue(str))
int_or_3 = MultiValuedValue([TypedValue(int), KnownValue(3)])
assert_is_value(
weak_dict, make_weak(GenericValue(dict, [int_or_3, TypedValue(str)]))
)
assert_is_value(
weak_dict.setdefault(3),
MultiValuedValue([TypedValue(str), KnownValue(None)]),
)
assert_is_value(
weak_dict,
make_weak(
GenericValue(
dict,
[
int_or_3,
MultiValuedValue([TypedValue(str), KnownValue(None)]),
],
)
),
)
def strong_typed(strong_dict: Dict[int, str]):
expected = GenericValue(dict, [TypedValue(int), TypedValue(str)])
assert_is_value(strong_dict, expected)
assert_is_value(strong_dict.setdefault(3, str(TD)), TypedValue(str))
assert_is_value(strong_dict, expected)
assert_is_value(
strong_dict.setdefault(3),
MultiValuedValue([TypedValue(str), KnownValue(None)]),
)
assert_is_value(strong_dict, expected)
@assert_passes()
def test_dict_update(self):
def capybara():
d1 = {}
d1.update({})
d2 = {}
d2.update(a=3, b=4)
assert_is_value(
d2,
DictIncompleteValue(
dict,
[
KVPair(KnownValue("a"), KnownValue(3)),
KVPair(KnownValue("b"), KnownValue(4)),
],
),
)
d2.update([("a", 4), ("b", 5)])
assert_is_value(
d2,
DictIncompleteValue(
dict,
[
KVPair(KnownValue("a"), KnownValue(3)),
KVPair(KnownValue("b"), KnownValue(4)),
KVPair(KnownValue("a"), KnownValue(4)),
KVPair(KnownValue("b"), KnownValue(5)),
],
),
)
@assert_passes()
def test_copy_and_update(self):
from typing import Dict
from pyanalyze.value import WeakExtension
def capybara():
d1: Dict[str, int] = {"x": 1}
d1_val = GenericValue(dict, [TypedValue(str), TypedValue(int)])
assert_is_value(d1, d1_val)
d1[1] = 3 # E: incompatible_argument
d2 = d1.copy()
assert_is_value(d2, AnnotatedValue(d1_val, [WeakExtension()]))
d2[1] = 3
assert_is_value(
d2,
DictIncompleteValue(
dict,
[
KVPair(TypedValue(str), TypedValue(int), is_many=True),
KVPair(KnownValue(1), KnownValue(3)),
],
),
)
class TestSequenceGetItem(TestNameCheckVisitorBase):
@assert_passes()
def test_list(self):
from typing import List
def capybara(lst: List[int], i: int, s: slice, unannotated) -> None:
assert_is_value(lst[0], TypedValue(int))
assert_is_value(lst[-1], TypedValue(int))
assert_is_value(lst[:1], GenericValue(list, [TypedValue(int)]))
assert_is_value(lst[i], TypedValue(int))
assert_is_value(lst[s], GenericValue(list, [TypedValue(int)]))
assert_is_value(lst[unannotated], AnyValue(AnySource.from_another))
empty = []
assert_is_value(empty[0], AnyValue(AnySource.unreachable))
assert_is_value(empty[1:], KnownValue([]))
assert_is_value(empty[i], AnyValue(AnySource.unreachable))
assert_is_value(empty[s], SequenceIncompleteValue(list, []))
assert_is_value(empty[unannotated], AnyValue(AnySource.from_another))
known = [1, 2]
assert_is_value(known[0], KnownValue(1))
assert_is_value(known[-1], KnownValue(2))
assert_is_value(known[-5], KnownValue(1) | KnownValue(2))
assert_is_value(known[1:], KnownValue([2]))
assert_is_value(known[::-1], KnownValue([2, 1]))
assert_is_value(known[i], KnownValue(1) | KnownValue(2))
assert_is_value(
known[s], SequenceIncompleteValue(list, [KnownValue(1), KnownValue(2)])
)
assert_is_value(known[unannotated], AnyValue(AnySource.from_another))
@assert_passes()
def test_tuple(self):
from typing import Tuple
def capybara(tpl: Tuple[int, ...], i: int, s: slice, unannotated) -> None:
assert_is_value(tpl[0], TypedValue(int))
assert_is_value(tpl[-1], TypedValue(int))
assert_is_value(tpl[:1], GenericValue(tuple, [TypedValue(int)]))
assert_is_value(tpl[:], GenericValue(tuple, [TypedValue(int)]))
assert_is_value(tpl[i], TypedValue(int))
assert_is_value(tpl[s], GenericValue(tuple, [TypedValue(int)]))
assert_is_value(tpl[unannotated], AnyValue(AnySource.from_another))
empty = ()
assert_is_value(empty[0], AnyValue(AnySource.error)) # E: incompatible_call
assert_is_value(empty[1:], KnownValue(()))
assert_is_value(empty[i], AnyValue(AnySource.unreachable))
assert_is_value(empty[s], SequenceIncompleteValue(tuple, []))
assert_is_value(empty[unannotated], AnyValue(AnySource.from_another))
known = (1, 2)
assert_is_value(known[0], KnownValue(1))
assert_is_value(known[-1], KnownValue(2))
assert_is_value(
known[-5], AnyValue(AnySource.error) # E: incompatible_call
)
assert_is_value(known[1:], KnownValue((2,)))
assert_is_value(known[::-1], KnownValue((2, 1)))
assert_is_value(known[i], KnownValue(1) | KnownValue(2))
assert_is_value(
known[s], SequenceIncompleteValue(tuple, [KnownValue(1), KnownValue(2)])
)
assert_is_value(known[unannotated], AnyValue(AnySource.from_another))
@assert_passes()
def test_list_index(self):
def capybara(x):
lst = ["a", "b", int(x)]
assert_is_value(lst[0], KnownValue("a"))
assert_is_value(lst[2], TypedValue(int))
assert_is_value(lst[-2], KnownValue("b"))
assert_is_value(lst[5], KnownValue("a") | KnownValue("b") | TypedValue(int))
@assert_passes()
def test_tuple_index(self):
def capybara(x):
tpl = ("a", "b", int(x))
assert_is_value(tpl[0], KnownValue("a"))
assert_is_value(tpl[2], TypedValue(int))
assert_is_value(tpl[-2], KnownValue("b"))
assert_is_value(tpl[5], AnyValue(AnySource.error)) # E: incompatible_call
@assert_passes()
def test_tuple_annotation(self):
from typing import Tuple
def capybara(tpl: Tuple[int, str, float]) -> None:
assert_is_value(tpl[0], TypedValue(int))
assert_is_value(tpl[-2], TypedValue(str))
assert_is_value(tpl[2], TypedValue(float))
@assert_passes()
def test_list_in_lambda(self):
from typing import List
def capybara(words: List[str]):
sorted_indexes = sorted(range(len(words)), key=lambda i: words[i])
return sorted_indexes
@assert_passes()
def test_subclasses(self):
import time
class MyList(list):
pass
class MyTuple(tuple):
pass
def capybara(t: time.struct_time, ml: MyList, mt: MyTuple):
assert_is_value(t[0], TypedValue(int))
assert_is_value(t[:], TypedValue(tuple))
assert_is_value(t[:6], TypedValue(tuple))
assert_is_value(ml[0], AnyValue(AnySource.generic_argument))
assert_is_value(ml[:], TypedValue(list))
assert_is_value(mt[0], AnyValue(AnySource.generic_argument))
assert_is_value(mt[:], TypedValue(tuple))
class TestDictGetItem(TestNameCheckVisitorBase):
@assert_passes()
def test_unhashable(self):
def capybara():
d = {}
d[{}] # E: unhashable_key
@assert_passes()
def test_invalid_typeddict_key(self):
from typing_extensions import TypedDict
class TD(TypedDict):
a: int
def capybara(td: TD):
td[1] # E: invalid_typeddict_key
@assert_passes()
def test_incomplete_value(self):
def capybara(a: int, unresolved):
incomplete_value = {a: 1, "b": 2, "c": "s"}
assert_is_value(
incomplete_value,
DictIncompleteValue(
dict,
[
KVPair(TypedValue(int), KnownValue(1)),
KVPair(KnownValue("b"), KnownValue(2)),
KVPair(KnownValue("c"), KnownValue("s")),
],
),
)
assert_is_value(incomplete_value["b"], KnownValue(2))
assert_is_value(incomplete_value[1], KnownValue(1))
assert_is_value(
incomplete_value[unresolved],
MultiValuedValue([KnownValue(1), KnownValue(2), KnownValue("s")]),
)
# unknown key
assert_is_value(incomplete_value["other string"], AnyValue(AnySource.error))
# MultiValuedValue
key = "b" if unresolved else "c"
assert_is_value(
incomplete_value[key],
MultiValuedValue([KnownValue(2), KnownValue("s")]),
)
@assert_passes()
def test_complex_incomplete(self):
from typing import Sequence
from typing_extensions import NotRequired, TypedDict
class TD(TypedDict):
a: float
b: NotRequired[bool]
def capybara(i: int, seq: Sequence[int], td: TD, s: str):
d1 = {"a": i, "b": i + 1}
d2 = {i: 1 for i in seq}
d3 = {"a": 1, **d1, "b": 2, **d2}
assert_is_value(
d3,
DictIncompleteValue(
dict,
[
KVPair(KnownValue("a"), KnownValue(1)),
KVPair(KnownValue("a"), TypedValue(int)),
KVPair(KnownValue("b"), TypedValue(int)),
KVPair(KnownValue("b"), KnownValue(2)),
KVPair(TypedValue(int), KnownValue(1), is_many=True),
],
),
)
assert_is_value(d3[1], KnownValue(1))
assert_is_value(d3["a"], TypedValue(int))
assert_is_value(d3["b"], KnownValue(2))
assert_is_value(d3[s], TypedValue(int) | KnownValue(2))
d4 = {**d3, **td}
assert_is_value(d4[1], KnownValue(1))
assert_is_value(d4["a"], TypedValue(float))
assert_is_value(d4["b"], KnownValue(2) | TypedValue(bool))
assert_is_value(d4[s], TypedValue(float) | KnownValue(2) | TypedValue(bool))
@assert_passes()
def test(self):
from typing import Dict, Generic, TypeVar
from typing_extensions import TypedDict
K = TypeVar("K")
V = TypeVar("V")
class ReversedDict(Generic[V, K], Dict[K, V]):
pass
class NormalDict(Generic[K, V], Dict[K, V]):
pass
class TD(TypedDict):
a: int
def capybara(
td: TD,
dct: Dict[str, int],
rev: ReversedDict[str, int],
nd: NormalDict[int, str],
untyped: dict,
):
d = {1: 2}
assert_is_value(d[1], KnownValue(2))
assert_is_value(td["a"], TypedValue(int))
assert_is_value(dct["key"], TypedValue(int))
assert_is_value(nd[1], TypedValue(str))
assert_is_value(rev[1], TypedValue(str))
untyped[[]] # E: unhashable_key
dct[1] # E: incompatible_argument
@assert_passes()
def test_type_as_key(self):
from typing import Type
def capybara(d: dict, t: Type[int]):
d[int]
d[t]
class TestDictSetItem(TestNameCheckVisitorBase):
@assert_passes()
def test_typeddict_setitem_valid(self):
from typing_extensions import TypedDict
class TD(TypedDict):
x: int
def capybara(td: TD) -> None:
td["x"] = 42
@assert_passes()
def test_typeddict_non_literal_key(self):
from typing_extensions import TypedDict
class TD(TypedDict):
x: int
def capybara(td: TD) -> None:
td[41] = 42 # E: invalid_typeddict_key
@assert_passes()
def test_typeddict_unrecognized_key(self):
from typing_extensions import TypedDict
class TD(TypedDict):
x: int
def capybara(td: TD) -> None:
td["y"] = 42 # E: invalid_typeddict_key
@assert_passes()
def test_typeddict_bad_value(self):
from typing_extensions import TypedDict
class TD(TypedDict):
x: int
def capybara(td: TD) -> None:
td["x"] = "y" # E: incompatible_argument
@assert_passes()
def test_incomplete_value(self):
def capybara(x: int, y: str) -> None:
dct = {}
assert_is_value(dct, KnownValue({}))
dct["x"] = x
assert_is_value(
dct,
DictIncompleteValue(dict, [KVPair(KnownValue("x"), TypedValue(int))]),
)
dct[y] = "x"
assert_is_value(
dct,
DictIncompleteValue(
dict,
[
KVPair(KnownValue("x"), TypedValue(int)),
KVPair(TypedValue(str), KnownValue("x")),
],
),
)
@assert_passes()
def test_bad_key_type(self):
from typing import Dict
def capybara(untyped: dict) -> None:
dct: Dict[str, int] = {}
dct[1] = 1 # E: incompatible_argument
untyped[[]] = 1 # E: unhashable_key
@assert_passes()
def test_bad_value_type(self):
from typing import Dict
def capybara() -> None:
dct: Dict[str, int] = {}
dct["1"] = "1" # E: incompatible_argument
@assert_passes()
def test_weak(self):
from pyanalyze.value import WeakExtension
def capybara(arg):
dct = {int(k): 1 for k in arg}
assert_is_value(
dct,
AnnotatedValue(
GenericValue(dict, [TypedValue(int), KnownValue(1)]),
[WeakExtension()],
),
)
dct["x"] = "y"
assert_is_value(
dct,
DictIncompleteValue(
dict,
[
KVPair(TypedValue(int), KnownValue(1), is_many=True),
KVPair(KnownValue("x"), KnownValue("y")),
],
),
)
class TestIssubclass(TestNameCheckVisitorBase):
@assert_passes()
def test(self) -> None:
def capybara(x: type, y):
assert_is_value(x, TypedValue(type))
if issubclass(x, str):
assert_is_value(x, SubclassValue(TypedValue(str)))
if issubclass(y, (int, str)):
assert_is_value(
y,
MultiValuedValue(
[SubclassValue(TypedValue(int)), SubclassValue(TypedValue(str))]
),
)
@assert_passes()
def test_negative_narrowing(self) -> None:
from typing import Type, Union
def capybara(x: Union[Type[str], Type[int]]) -> None:
assert_is_value(
x, SubclassValue(TypedValue(str)) | SubclassValue(TypedValue(int))
)
if issubclass(x, str):
assert_is_value(x, SubclassValue(TypedValue(str)))
else:
assert_is_value(x, SubclassValue(TypedValue(int)))
class TestCallableGuards(TestNameCheckVisitorBase):
@assert_passes()
def test_callable(self):
from pyanalyze.signature import ANY_SIGNATURE
def capybara(o: object) -> None:
assert_is_value(o, TypedValue(object))
if callable(o):
assert_is_value(o, CallableValue(ANY_SIGNATURE))
@assert_passes()
def test_isfunction(self):
from types import FunctionType
import inspect
def capybara(o: object) -> None:
assert_is_value(o, TypedValue(object))
if inspect.isfunction(o):
assert_is_value(o, TypedValue(FunctionType))
|
StarcoderdataPython
|
1792204
|
import os
import matplotlib.pyplot as plt
from ArmMovementPredictionStudien.Preprocessing.utils.utils import open_dataset_pandas
import pandas as pd
ROOT_DIR = os.path.dirname(__file__) + "/../../"
base_directory = ROOT_DIR + "DATA/"
raw_directory = base_directory + "0_raw/"
truncated_directory = base_directory + "3_truncated/"
filelist_raw = os.listdir(raw_directory)
filelist_truncated = os.listdir(truncated_directory)
length_raw = {}
for file in filelist_raw:
raw_df = open_dataset_pandas(file, raw_directory)
length_raw.update({file: len(raw_df)})
length_truncated = {}
for file in filelist_truncated:
truncated_df = open_dataset_pandas(file, truncated_directory)
length_truncated.update({file: len(truncated_df)})
fig, ax = plt.subplots(1, 2)
pd.DataFrame({"length_raw": list(length_raw.values())}).hist(ax=ax[0], bins=20)
pd.DataFrame({"length_truncated": list(length_truncated.values())}).hist(ax=ax[1], bins=20)
print(dict(filter(lambda e: e[1] > 160, length_truncated.items())))
plt.show()
|
StarcoderdataPython
|
3396282
|
<gh_stars>0
import stdio
import random
random_values = tuple(map(lambda x: random.random(), range(5)))
stdio.writeln("Random values: " + "\n" +
"-" * 30 + "\n"
"{}".format(random_values) + "\n" +
"-" * 100 + "\n" +
"mean: {}".format(sum(random_values) / 5) + "\n"
"min: {}".format(min(random_values)) + "\n" +
"max: {}".format(max(random_values)) + "\n")
|
StarcoderdataPython
|
114235
|
<reponame>sungho-joo/leetcode2github<gh_stars>0
# @l2g 200 python3
# [200] Number of Islands
# Difficulty: Medium
# https://leetcode.com/problems/number-of-islands
#
# Given an m x n 2D binary grid grid which represents a map of '1's (land) and '0's (water),
# return the number of islands.
# An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically.
# You may assume all four edges of the grid are all surrounded by water.
#
# Example 1:
#
# Input: grid = [
# ["1","1","1","1","0"],
# ["1","1","0","1","0"],
# ["1","1","0","0","0"],
# ["0","0","0","0","0"]
# ]
# Output: 1
#
# Example 2:
#
# Input: grid = [
# ["1","1","0","0","0"],
# ["1","1","0","0","0"],
# ["0","0","1","0","0"],
# ["0","0","0","1","1"]
# ]
# Output: 3
#
#
# Constraints:
#
# m == grid.length
# n == grid[i].length
# 1 <= m, n <= 300
# grid[i][j] is '0' or '1'.
#
#
import collections
from typing import List
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
n, m = len(grid), len(grid[0])
boundary_checker = lambda x, y: 0 <= x < n and 0 <= y < m
dirs = [[1, 0], [-1, 0], [0, 1], [0, -1]]
def bfs(pos):
q = collections.deque()
q.append(pos)
grid[pos[0]][pos[1]] = -1
while q:
x, y = q.popleft()
for dx, dy in dirs:
nx, ny = x + dx, y + dy
if boundary_checker(nx, ny) and grid[nx][ny] == "1":
q.append([nx, ny])
grid[nx][ny] = -1
ans = 0
for i in range(n):
for j in range(m):
if grid[i][j] == "1":
bfs([i, j])
ans += 1
return ans
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_200.py")])
|
StarcoderdataPython
|
113188
|
<reponame>Max-PJB/python-learning2
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : Max_Pengjb
@ date : 2018/9/23 22:37
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : <EMAIL>
-------------------------------------------------
Description :
-------------------------------------------------
"""
import time
__author__ = 'Max_Pengjb'
start = time.time()
# 下面写上代码块
"""
生成器 (Generator)
生成器是一类特殊的迭代器,它是一种更为高级、更为优雅的迭代器。
在Python中有两种类型的生成器:生成器函数以及生成器表达式。
生成器函数
生成器函数与一个普通函数很像,但是当它要返回一个值时,用的不是return,而是yield。只要函数中使用了yield关键字,他就变成了一个生成器函数:
def Fun1():
yield 1
yield 2
上面例子中的Fun1就是一个生成器函数,它使用了两条yield语句。
如果熟悉return语句的含义,就会对上面的函数感到奇怪:函数每次执行都会在执行完yield 1之后返回,下面的yield 2有什么意义呢?
这正是yield与return语句不同的地方。一般的函数在调用时会立刻执行函数体,直到遇到return语句返回一个值(当然也可以不返回任何值,此处只讨论有返回值的情况)。
而生成器函数不同,他会返回一个生成器,并不立刻执行函数,只有当对生成器进行next操作时,才会真正开始执行函数,我们称这种特性为延迟计算(Lazy Evaluation)
并且,每一次进行next操作,函数会从上一个yield语句(或者函数第一条语句)执行到下一个yield语句(或者函数结尾),然后返回yield的值(或者引发StopIteration异常):
def Fun1():
print("yield 1") #语句1
yield 1 #语句2
print("yield 2") #语句3
yield 2 #语句4
g = Fun1() #获得一个生成器
print("before next")
print(next(g))
print(next(g))
上面的结果是:
before next
yield 1
1
yield 2
2
可以看到直到程序运行到print(next(g))时,Fun1函数才真正开始执行,第一条next使函数从语句1运行到了语句2,第二条next使函数从语句3运行到了语句4,就像函数发生了暂停,下一次调用时就从暂停的位置继续运行。
生成器函数强大的地方在于,它不仅能保存执行流程,执行过程中的局部变量也是能保存的:
def Fun1():
n = 0
yield n
n += 1
yield n
n += 1
yield n
g = Fun1()
for s in g: #生成器也是一个迭代器,所以也可以使用for语句
print(s)
得到的结果是:
0
1
2
每次yield语句之后局部变量n的值都会保存下来,下次next时能够继续使用上一次的n值。
有了这些特性,我们就可以更加优雅的创建迭代器了:
def Fun1(max):
start = 0
while start < max:
yield start
start += 1
g = Fun1(3) #得到一个能返回0-2的生成器
for s in g:
print(s)
得到的结果:
0
1
2
通常,我们将这种能够中断执行,之后又能从断点以上一次的状态继续执行的函数叫做协程(Coroutine)。
列表生成式
要介绍生成器表达式,首先需要了解一下列表生成式。
列表生成式是一种创建列表的方式,当我们要创建一个满足特定条件的列表时,使用它就非常方便。
比如我们要创建一个包含0到10的平方的列表,传统的做法是:
data = []
for x in range(11):
data.append(x * x)
而使用列表生成式,就可以一行语句创建:
data =[x*x for x in range(11)]
方括号里的左边部分是一个表达式x*x,代表列表元素怎么计算得来,右边是用for来表示的范围。
如果要进行条件筛选,可以在for后面带上if语句:
data =[x*x for x in range(11) if x % 2 == 0]
#生成包含0-10中偶数的平方的列表
不仅如此,for语句还能嵌套:
data = [x*y for x in range(11) if x %2 == 0 for y in range(11)]
#列表中的元素是第一个for代表的[0,2,4,6,8,10]与第二个for代表的[0,1,2.....,9,10]中每个元素分别相乘的结果,共6*11=66项
生成器表达式
生成器表达式与列表生成式语法基本一样,只是把方括号[]换成圆括号():
data = (x*x for x in range(11))
此时data是一个生成器,还没有包含0-10的平方项的元素。只有使用next函数调用时,才会一项一项的返回元素:
data = (x*x for x in range(11))
print(next(data))
print(next(data))
print(next(data))
得到的结果:
0
1
4
可见,它也满足延迟计算的特点。
与使用列表生成式马上创建一个包含指定元素的列表的方法相比,使用生成器表达式更节省内存空间,尤其当我们只是需要遍历一个满足特殊要求的序列时:
data = [x*x for x in range(100000)] #一下子需要生成100000个元素,内存占用很大
data2 = (x*x for x in range(100000)) #只创建一个生成器,每次只保存当前x的值,需要时就计算下一个值,节省内存空间
编程要求
根据提示,在右侧编辑器补充代码,实现myrange的功能。
myrange函数接受三个参数start,stop,step,就如同Python内置的函数range一样,start代表起始值,stop代表结束值(不包括),step代表步长。返回值是一个包含这一范围内的元素的迭代器。
测试说明
每组测试有4个数据输入,代表start,stop,step,n,其中start,stop,step为任意整数,n大于等于0。4个输入的数据由测试代码读取,不需要学员处理。
测试代码会将前三个数据作为参数调用myrange函数,然后使用for来打印返回的迭代器中最多n个值。
测试输入:1 2 1 5
输出:
1
测试输入:0 7 1 3
输出:
0
1
2
这一组数据虽然会生成一个有7个数据的迭代器,但由于n是3,所以只输出前3个数据。"""
def myrange(start, stop, step):
# 补充这个生成器函数的代码,实现要求的功能
k = start
if step < 0:
while k > stop:
yield k
k += step
else:
while k < stop:
yield k
k += step
# 上面中间写上代码块
end = time.time()
print('Running time: %s Seconds' % (end - start))
|
StarcoderdataPython
|
1785464
|
v = float(input('Digite o valor: '))
print('É possível comprar US${:.2f}'.format(v/3.27))
|
StarcoderdataPython
|
3381042
|
"""
pynet data augmentation overview
================================
Credit: <NAME>
pynet contains a set of tools to efficiently augment 3D medical images that
is crutial for deep learning applications. It includes random affine/non linear
transformations, simulation of intensity artifacts due to MRI magnetic field
inhomogeneity or k-space motion artifacts, and others.
Load the data
-------------
We load the Brats dataset and select the first MRI brain image.
"""
import os
import sys
if "CI_MODE" in os.environ:
sys.exit()
import time
import numpy as np
import nibabel
import random
from pynet.datasets import DataManager, fetch_toy, fetch_brats
from pynet.preprocessing import rescale, downsample
datasetdir = "/tmp/toy"
data = fetch_toy(datasetdir=datasetdir)
image = nibabel.load(data.t1w_path)
image = rescale(downsample(image.get_data(), scale=4), dynamic=(0, 255))
#############################################################################
# Define deformations
# -------------------
#
# We now declare MRI brain deformation functions. The deformation can be
# combined with the Transformer class.
from pynet.augmentation import add_blur
from pynet.augmentation import add_noise
from pynet.augmentation import add_ghosting
from pynet.augmentation import add_spike
from pynet.augmentation import add_biasfield
from pynet.augmentation import add_motion
from pynet.augmentation import add_offset
from pynet.augmentation import flip
from pynet.augmentation import affine
from pynet.augmentation import deformation
from pynet.augmentation import Transformer
compose_transforms = Transformer(with_channel=False)
compose_transforms.register(
flip, probability=0.5, axis=0, apply_to=["all"])
compose_transforms.register(
add_blur, probability=1, sigma=4, apply_to=["all"])
transforms = {
"add_blur": (add_blur, {"sigma": 4}),
"add_noise": (add_noise, {"snr": 5., "noise_type": "rician"}),
"flip": (flip, {"axis": 0}),
"affine": (affine, {"rotation": 5, "translation": 0, "zoom": 0.05}),
"add_ghosting": (add_ghosting, {"n_ghosts": (4, 10), "axis": 2,
"intensity": (0.5, 1)}),
"add_spike": (add_spike, {"n_spikes": 1, "intensity": (0.1, 1)}),
"add_biasfield": (add_biasfield, {"coefficients": 0.5}),
"deformation": (deformation, {"max_displacement": 4, "alpha": 3}),
"add_motion": (add_motion, {"rotation": 10, "translation": 10,
"n_transforms": 2, "perturbation": 0.3}),
"add_offset": (add_offset, {"factor": (0.05, 0.1)}),
"compose_transforms": (compose_transforms, {}),
}
#############################################################################
# Test transformations
# --------------------
#
# We now apply the transformations on the loaded image. Results are
# directly displayed in your browser at http://localhost:8097.
from pynet.plotting import Board
board = Board(port=8097, host="http://localhost", env="data-augmentation")
for cnt in range(10):
print("Iteration: ", cnt)
for key, (fct, kwargs) in transforms.items():
images = np.asarray([image, np.clip(fct(image, **kwargs), 0, 255)])
images = images[..., images.shape[-1] // 2]
images = np.expand_dims(images, axis=1)
board.viewer.images(
images, opts={"title": key, "caption": key}, win=key)
time.sleep(1)
#############################################################################
# Data augmentation
# -----------------
#
# We now illustrate how we can use the Transformer in combinaison with
# the DataManager to perform data augmentation during training. Results are
# directly displayed in your browser at http://localhost:8097.
datasetdir = "/neurospin/nsap/processed/deepbrain/tumor/data/brats"
data = fetch_brats(datasetdir=datasetdir)
board = Board(port=8097, host="http://localhost", env="data-augmentation")
compose_transforms = Transformer()
compose_transforms.register(
flip, probability=0.5, axis=0, apply_to=["input", "output"])
compose_transforms.register(
add_blur, probability=1, sigma=4, apply_to=["input"])
manager = DataManager(
input_path=data.input_path,
metadata_path=data.metadata_path,
output_path=data.output_path,
number_of_folds=2,
batch_size=2,
test_size=0.1,
sample_size=0.1,
sampler=None,
add_input=True,
data_augmentation_transforms=[compose_transforms])
loaders = manager.get_dataloader(
train=True,
validation=False,
fold_index=0)
for dataitem in loaders.train:
print("-" * 50)
print(dataitem.inputs.shape, dataitem.outputs.shape, dataitem.labels)
images = [dataitem.inputs[0, 0].numpy(), dataitem.inputs[0, 1].numpy(),
dataitem.outputs[0, 0].numpy(), dataitem.outputs[0, 1].numpy(),
dataitem.outputs[0, 4].numpy(), dataitem.outputs[0, 5].numpy()]
images = np.asarray(images)
images = np.expand_dims(images, axis=1)
images = images[..., images.shape[-1] // 2]
images = rescale(images, dynamic=(0, 255))
board.viewer.images(
images, opts={"title": "transformer", "caption": "transformer"},
win="transformer")
time.sleep(2)
|
StarcoderdataPython
|
3221582
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 2020
@author: Cassio (chmendonca)
Description: This class will have the ship characteristics and almost all
behaviors
"""
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""A class that represents a single alien from the fleet"""
def __init__(self,ai_settings,screen):
"""Initializes the alien and defines its initial position"""
super(Alien, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
#Loads the alien image and defines its rectangle (rect)
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
#Starts each alien at the left upper corner of the screen
self.rect.x = self.rect.width
self.rect.y = self.rect.height
#Stores the alien exactly position
self.x = float(self.rect.x)
def blitme(self):
"""Draws the alien on its original position"""
self.screen.blit(self.image,self.rect)
def check_edges(self):
"""Returns True if one alien is at any edge of the screen"""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <=0:
return True
def update(self):
"""Moves the alien to the right or to the left"""
self.x += (self.ai_settings.alien_speed_factor *
self.ai_settings.fleet_direction)
self.rect.x = self.x
|
StarcoderdataPython
|
1699930
|
<filename>ERation/customer/models.py
from django.db import models
from django.core import validators as v
import eadmin.models as admin
# Create your models here.
class Customer(models.Model):
name = models.CharField(
verbose_name='Name',
name='name',
max_length=50,
null=False,
validators=(
v.MinLengthValidator(3),
)
)
phone = models.CharField(
verbose_name='Phone',
name='phone',
unique=True,
null=False,
max_length=10,
validators=(
v.MinLengthValidator(10),
v.MaxLengthValidator(10)
)
)
address = models.CharField(
verbose_name="Address",
name='address',
blank=False,
max_length=150,
validators=(
v.MinLengthValidator(10),
v.MaxLengthValidator(150)
)
)
card_number = models.CharField(
verbose_name="Ration card number",
name='card_number',
blank=False,
max_length=50,
unique=True,
validators=(
v.MinLengthValidator(10),
v.MaxLengthValidator(15)
)
)
shop_id = models.ForeignKey(
to='shop.Shop',
to_field='shop_id',
on_delete=models.CASCADE
)
card_type = models.ForeignKey(
verbose_name='Card type',
name='card_type',
max_length=10,
blank=False,
to='eadmin.Cards',
to_field='card_name',
on_delete=models.CASCADE
)
email = models.CharField(
max_length=50,
verbose_name='Email',
)
aadhar = models.BigIntegerField(
verbose_name='Aadhar Number',
validators=[
v.MinValueValidator(111111111111),
v.MaxValueValidator(9999999999999999)
]
)
class Orders(models.Model):
STATUS = (
('Pending', 'Pending'),
('Approved', 'Approved'),
('Delivered', 'Delivered'),
('Rejected', 'Rejected'),
('Out for delivery', 'Out for delivery')
)
customer = models.ForeignKey(
Customer,
on_delete=models.CASCADE
)
product = models.ForeignKey(
'eadmin.Products',
on_delete=models.CASCADE
)
price=models.IntegerField(
verbose_name='Price',
default=0,
validators=[
v.MaxValueValidator(10000, 'Price cannot exceed 10000'),
v.MinValueValidator(0, 'The minimum price should be 0')
]
)
day=models.IntegerField(
verbose_name='Day',
blank=False,
)
month=models.IntegerField(
verbose_name='Month',
blank=False
)
year=models.IntegerField(
verbose_name='Year',
blank=False
)
dday=models.IntegerField(
verbose_name='Day',
blank=False,
default=0
)
dmonth=models.IntegerField(
verbose_name='Month',
blank=False,
default=0
)
dyear=models.IntegerField(
verbose_name='Year',
blank=False,
default=0
)
otp=models.IntegerField(
verbose_name='OTP',
validators=[
v.MinValueValidator(0, "Invalid OTP Value"),
v.MaxValueValidator(9999, "Invalid OTP")
]
)
status=models.CharField(
choices=STATUS,
max_length=50
)
delivery_staff=models.CharField(
max_length=50,
verbose_name = 'Delivery Staff',
default = "0",
)
quantity=models.IntegerField(
verbose_name='Quantity',
default=1
)
|
StarcoderdataPython
|
4809435
|
<reponame>juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations<filename>tabular_experiments.py
from sklearn import tree, svm
from sklearn.neural_network import MLPClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.naive_bayes import GaussianNB
import matplotlib.pyplot as plt
import numpy as np
from generate_dataset import generate_dataset, preparing_dataset
from storeExperimentalInformations import store_experimental_informations
import ape_tabular
import warnings
#from keras.models import Sequential
#from keras.layers import Dense
def evaluate_test_unimodality(bool, precision_ls, precision_anchor):
if bool:
if precision_ls >= precision_anchor:
return 1
else:
return 0
else:
if precision_ls <= precision_anchor:
return 1
else:
return 0
if __name__ == "__main__":
# Filter the warning from matplotlib
warnings.filterwarnings("ignore")
# Datasets used for the experiments
# "generate_circles", "generate_moons", "generate_blob", "diabete", "generate_blobs",
dataset_names = ['titanic']#["adult", "compas", "titanic", "mortality", 'categorical_generate_blobs', "blood"]
# array of the models used for the experiments
models = [VotingClassifier(estimators=[('lr', LogisticRegression()), ('gnb', GaussianNB()), ('svm', svm.SVC(probability=True))], voting='soft'),#('rc', RidgeClassifier())], voting="soft"),
GradientBoostingClassifier(n_estimators=20, learning_rate=1.0, random_state=1),
#MLPClassifier(random_state=1, activation="logistic"),
RandomForestClassifier(n_estimators=20, random_state=1),
MLPClassifier(random_state=1),
svm.SVC(probability=True, random_state=1)]
#RidgeClassifier(random_state=1)]
#Sequential(),
#GaussianNB
#KNeighborsClassifier
#LinearDiscriminantAnalysis
# Number of instances explained by each model on each dataset
max_instance_to_explain = 30
# Print explanation result
illustrative_example = False
""" All the variable necessaries for generating the graph results """
# Store results inside graph if set to True
graph = True
verbose = False
growing_sphere = False
if growing_sphere:
label_graph = "growing spheres "
growing_method = "GS"
else:
label_graph = ""
growing_method = "GF"
# Threshold for explanation method precision
threshold_interpretability = 0.99
linear_separability_index = 1
linear_models_name = ['local surrogate', 'lime extending', 'lime regression', 'lime not binarize', 'lime traditional']
interpretability_name = ['LS', 'LSe log', 'LSe lin', 'Anchors', 'APE SI', 'APE CF', 'APE FOLD', 'APE FULL', 'APE FULL pvalue', 'DT']
#interpretability_name = ['ls log reg', 'ls raw data']
# Initialize all the variable needed to store the result in graph
for dataset_name in dataset_names:
if graph: experimental_informations = store_experimental_informations(len(models), len(interpretability_name), interpretability_name, len(models))
models_name = []
# Store dataset inside x and y (x data and y labels), with aditional information
x, y, class_names, regression, multiclass, continuous_features, categorical_features, categorical_values, categorical_names, \
feature_names, transformations = generate_dataset(dataset_name)
for nb_model, black_box in enumerate(models):
model_name = type(black_box).__name__
if "MLP" in model_name and nb_model <=2 :
model_name += "logistic"
if growing_sphere:
filename = "./results/"+dataset_name+"/growing_spheres/"+model_name+"/"+str(threshold_interpretability)+"/"
filename_all = "./results/"+dataset_name+"/growing_spheres/"+str(threshold_interpretability)+"/"
else:
filename="./results/"+dataset_name+"/"+model_name+"/"+str(threshold_interpretability)+"/"
filename_all="./results/"+dataset_name+"/"+str(threshold_interpretability)+"/"
if graph: experimental_informations.initialize_per_models(filename)
models_name.append(model_name)
# Split the dataset inside train and test set (50% each set)
x_train, x_test, y_train, y_test = preparing_dataset(x, y, dataset_name)
print()
print()
print("###", model_name, "training on", dataset_name, "dataset.")
if 'Sequential' in model_name:
# Train a neural network classifier with 2 relu and a sigmoid activation function
black_box.add(Dense(12, input_dim=len(x_train[0]), activation='relu'))
black_box.add(Dense(8, activation='relu'))
black_box.add(Dense(1, activation='sigmoid'))
black_box.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
black_box.fit(x_train, y_train, epochs=50, batch_size=10)
def predict(x):
if x.shape[0] > 1:
return np.asarray([prediction[0] for prediction in black_box.predict_classes(x)])
return black_box.predict_classes(x)[0]
def score(x, y):
return sum(predict(x) == y)/len(y)
else:
black_box = black_box.fit(x_train, y_train)
predict = black_box.predict
score = black_box.score
print('### Accuracy:', score(x_test, y_test))
cnt = 0
explainer = ape_tabular.ApeTabularExplainer(x_train, class_names, predict, black_box.predict_proba,
continuous_features=continuous_features,
categorical_features=categorical_features, categorical_values=categorical_values,
feature_names=feature_names, categorical_names=categorical_names,
verbose=verbose, threshold_precision=threshold_interpretability,
linear_separability_index=linear_separability_index,
transformations=transformations)
for instance_to_explain in x_test:
if cnt == max_instance_to_explain:
break
print("### Instance number:", cnt + 1, "over", max_instance_to_explain)
print("### Models ", nb_model + 1, "over", len(models))
print("instance to explain:", instance_to_explain)
#print("class", black_box.predict_proba(instance_to_explain.reshape(1, -1))[0])
try:
#test+=2
#except:
precision, coverage, f2, multimodal_result, radius, real_precisions = explainer.explain_instance(instance_to_explain,
growing_method=growing_method,
all_explanations_model=True)
print("precision", precision)
print("real precision", real_precisions)
"""print("coverage", coverage)
print("f2", f2)
print("multimodal", multimodal_result)
print("radius", radius)
print("separability", explainer.separability_index)
print("friends pvalue", explainer.friends_pvalue)
print("counterfactual pvalue", explainer.counterfactual_pvalue)
print("friends folding statistic", explainer.friends_folding_statistics)
print("counterfactual folding statistic", explainer.counterfactual_folding_statistics)
# Evaluate whether the linear separability index returns truely the case where the precision of LS is better than Anchors
si_bon = evaluate_test_unimodality(explainer.separability_index >= linear_separability_index, precision[1], precision[2])
# Evaluate whether the unimodality test returns truely the case where the precision of LS is better than Anchors
fold_bon = evaluate_test_unimodality(explainer.friends_folding_statistics >= 1 and explainer.counterfactual_folding_statistics >=1,
precision[1], precision[3])
cf_bon = evaluate_test_unimodality(explainer.counterfactual_folding_statistics >=1, precision[1], )
"""
cf_bon = 1 if (precision[5] >= precision[1] and precision[5] >= precision[3]) else 0
ape_bon = 1 if (precision[4] >= precision[1] and precision[4] >= precision[3]) else 0
si_bon = 1 if (precision[7] >= precision[1] and precision[7] >= precision[3]) else 0
fold_bon = 1 if (precision[6] >= precision[1] and precision[6] >= precision[3]) else 0
ape_pvalue_bon = 1 if (precision[8] >= precision[1] and precision[8] >= precision[3]) else 0
print("separability index bon", si_bon)
print("counterfactual folding bon", cf_bon)
print("fold bon", fold_bon)
print("ape bon", ape_bon)
print("ape pvalue bon", ape_pvalue_bon)
if graph: experimental_informations.store_experiments_information_instance(precision, 'precision.csv', coverage,
'coverage.csv', f2, 'f2.csv', real_precisions, 'real_precisions.csv',
multimodal=[precision[0], precision[1], precision[2], precision[3], precision[4], precision[5],
precision[6], precision[7], precision[8], precision[9],
multimodal_result, radius, explainer.friends_pvalue,
explainer.counterfactual_pvalue,
explainer.separability_index, explainer.friends_folding_statistics,
explainer.counterfactual_folding_statistics, si_bon, cf_bon,
fold_bon, ape_bon, ape_pvalue_bon, model_name])
cnt += 1
except Exception as inst:
print(inst)
if graph: experimental_informations.store_experiments_information(max_instance_to_explain, nb_model, filename1='precision.csv',
filename2='coverage.csv', filename3='f2.csv', filename4='real_precisions.csv', filename_multimodal="multimodal.csv",
filename_all=filename_all)
|
StarcoderdataPython
|
3305553
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# This plugins is licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Authors: <NAME> <<EMAIL>>
from gluon import *
def multiselect_widget(field, value, **attributes):
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
for require in requires:
if hasattr(require, 'options'):
options = require.options()
break
else:
raise SyntaxError('widget cannot determine options of %s' % field)
selected_opts = {}
unselected_opts = []
_value = map(str, value) if value else []
for (k, v) in options:
opt = OPTION(v, _value=k)
if _value and k in _value:
selected_opts[k] = opt
else:
unselected_opts.append(opt)
if _value:
selected_opts = [selected_opts[k] for k in _value if k in selected_opts] # preserve the sort order
else:
selected_opts = []
unselected_el_id = "unselected_%s" % field.name
select_el_id = field.name
script_el = SCRIPT("""
function plugin_multiselect_widget_move(select, target) {
jQuery('#' + select).children().each(function() {
if (this.selected) {
jQuery('#' + target).append(this);
jQuery(this).attr({selected: false});
}
});
}
jQuery(document).ready(function() {
jQuery("form input[type=submit]").click(function() {
jQuery('#' +'%s').children().attr({selected: true});
});
});""" % select_el_id)
width = attributes.get('width', 320)
size = attributes.get('size', 6)
unselected_el = SELECT(_id=unselected_el_id, _size=size, _style="width:%spx" % width, _multiple=True,
*unselected_opts)
select_el = SELECT(_id=select_el_id, _size=size, _style="width:%spx" % width, _multiple=True,
_name=field.name, requires=field.requires,
*selected_opts)
attributes['_style'] = attributes.get('_style', 'padding-bottom:10px;')
arrangement = attributes.get('arrangement', 'vertical')
reversed = attributes.get('reversed', False)
if arrangement == 'vertical':
if not reversed:
return DIV(script_el, unselected_el, BR(),
CENTER(
INPUT(_type='button',
_value=attributes.get('label_register', '↓ %s ↓' % current.T('register')),
_onclick=('plugin_multiselect_widget_move("%s", "%s");' %
(unselected_el_id, select_el_id))), ' ',
INPUT(_type='button',
_value=attributes.get('label_delete', '↑ %s ↑' % current.T('delete')),
_onclick=('plugin_multiselect_widget_move("%s", "%s");' %
(select_el_id, unselected_el_id))),
_style='padding:5px 0px;width:%spx;' % width),
select_el,
_id='%s_%s' % (field._tablename, field.name),
**attributes)
else:
return DIV(script_el, select_el, BR(),
CENTER(INPUT(_type='button',
_value=attributes.get('label_register', '↑ %s ↑' % current.T('register')),
_onclick=('plugin_multiselect_widget_move("%s", "%s");' %
(unselected_el_id, select_el_id))), ' ',
INPUT(_type='button',
_value=attributes.get('label_delete', '↓ %s ↓' % current.T('delete')),
_onclick=('plugin_multiselect_widget_move("%s", "%s");' %
(select_el_id, unselected_el_id))),
_style='padding:5px 0px;width:%spx;' % width),
unselected_el,
_id='%s_%s' % (field._tablename, field.name),
**attributes)
elif arrangement == 'horizontal':
if not reversed:
return DIV(script_el, TABLE(TR(
TD(unselected_el),
TD(
INPUT(_type='button',
_value=attributes.get('label_register', '%s →' % current.T('register')),
_onclick=('plugin_multiselect_widget_move("%s", "%s");' %
(unselected_el_id, select_el_id))), BR(), BR(),
INPUT(_type='button',
_value=attributes.get('label_delete', '← %s' % current.T('delete')),
_onclick=('plugin_multiselect_widget_move("%s", "%s");' %
(select_el_id, unselected_el_id))),
_style='vertical-align:middle;padding-right: 10px;text-align:center;'
),
TD(select_el),
)),
_id='%s_%s' % (field._tablename, field.name),
**attributes)
else:
return DIV(script_el, TABLE(TR(
TD(select_el),
TD(
INPUT(_type='button',
_value=attributes.get('label_register', '← %s' % current.T('register')),
_onclick=('plugin_multiselect_widget_move("%s", "%s");' %
(unselected_el_id, select_el_id))), BR(), BR(),
INPUT(_type='button',
_value=attributes.get('label_delete', '%s →' % current.T('delete')),
_onclick=('plugin_multiselect_widget_move("%s", "%s");' %
(select_el_id, unselected_el_id))),
_style='vertical-align:middle;padding-right: 10px;text-align:center;'
),
TD(unselected_el),
)),
_id='%s_%s' % (field._tablename, field.name),
**attributes)
def vmultiselect_widget(field, value, **attributes):
attributes['arrangement'] = 'vertical'
return multiselect_widget(field, value, **attributes)
def hmultiselect_widget(field, value, **attributes):
attributes['arrangement'] = 'horizontal'
attributes['width'] = 150
return multiselect_widget(field, value, **attributes)
def rvmultiselect_widget(field, value, **attributes):
attributes['reversed'] = True
return vmultiselect_widget(field, value, **attributes)
def rhmultiselect_widget(field, value, **attributes):
attributes['reversed'] = True
return hmultiselect_widget(field, value, **attributes)
|
StarcoderdataPython
|
3241074
|
from ..models import Experiment, Group, Subject
import dash
import dash_table
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.dash import no_update
from .utils.data_table import DatatableComponent
from .utils.graph_tsne import GraphTsne
from .utils.table_subjects import SubjectsTable
from pandas_profiling import ProfileReport
import pandas as pd
import json
import base64
import random
import string
import io
class PageSimplevis(html.Div):
def __init__(self, parent_app, id=None):
super().__init__([])
self.parent_app = parent_app
# Parent defined or randomly generated unique id
if id is not None:
self.id = id
else:
self.id = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(6))
self.button_load_data = dcc.Upload(
id='button-load-data',
children=dbc.Button("Load data", color='dark')
)
self.profile_summary = html.Iframe(
# src='static/output.html',
id="iframe-profile-summary",
style={"border": 0, "width": "100%", "height": "900px", "overflow": "auto"}
)
# Tabs
subjects = Subject.query.all()
for s in subjects:
subjects_table = SubjectsTable(parent_app=parent_app, subjects=subjects)
tab1_content = dbc.Card(
dbc.CardBody(
id='tab1',
children=[dbc.Row([
dbc.Col(
[
dcc.Loading(
id="loading-tab1",
type="circle",
color='#343a40',
children=html.Div(children=[subjects_table], id="tab1-data-table")
),
],
)
], justify="center")],
),
)
@self.parent_app.callback(
[
Output('tab1-data-table', 'children'),
Output('iframe-profile-summary', 'src'),
],
[Input('button-load-data', 'contents')],
)
def update_output(content):
if content is not None:
# Read and decode content
content_type, content_string = content.split(',')
decoded = base64.b64decode(content_string)
self.df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), index_col=0)
# Make data table for tab 1
table = dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i, "hideable": True, "renamable": True} for i in self.df.columns],
data=self.df.head(100).to_dict('records'),
style_table={
'overflowX': 'auto', 'height': '300px', 'overflowY': 'auto',
},
style_cell={
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
},
)
# Make profile summary for tab 2
# profile = df.profile_report(title="")
# profile.to_file("static/output.html")
return [table, 'static/output.html']
return [no_update, no_update]
tab2_content = dbc.Card(
dbc.CardBody(
id='tab2',
children=[
dbc.Row([
dbc.Col(
[
html.Div(
children=[self.profile_summary],
id="tab2-profile-summary",
)
],
)
], justify="center")
]
),
)
tab3_content = dbc.Card(
dbc.CardBody(
id='tab3',
children=[
dbc.Button("Run t-SNE", color='dark', id='button-run-tsne'),
html.Br(),
html.Br(),
dbc.Row([
dbc.Col(
[
dcc.Loading(
id="loading-tab3",
type="circle",
color='#343a40',
children=html.Div(children=[], id="tab3-graph_tsne")
),
],
)
], justify="center")
]
),
)
@self.parent_app.callback(
Output("tab3-graph_tsne", "children"),
[Input('button-run-tsne', "n_clicks")]
)
def function_run_tsne(n_clicks):
if n_clicks is not None:
self.tsne_vis = GraphTsne(parent_app=parent_app, df=self.df, id='simple-vis-tsne')
return [self.tsne_vis]
return no_update
# Tabs layout
tabs = dbc.Tabs(
[
dbc.Tab(tab1_content, label="Data table"),
dbc.Tab(tab2_content, label="Summary"),
dbc.Tab(tab3_content, label="t-SNE"),
]
)
# Children - main layout
self.children = dbc.Container([
self.button_load_data,
html.Br(),
tabs,
html.Br(),
html.Br()
])
# @self.parent_app.callback(
# Output("graphs-page-content", "children"),
# [
# Input("graphs-timeline-link", "n_clicks"),
# Input("graphs-distribution-link", "n_clicks"),
# Input("graphs-map-link", "n_clicks"),
# ]
# )
# def render_graphs_page_content(*c1):
# ctx = dash.callback_context
# trigger_source = ctx.triggered[0]['prop_id'].split('.')[0]
# if trigger_source == 'graphs-timeline-link':
# return self.graph_timeline
# elif trigger_source == 'graphs-timeline-lin':
# return html.P("Another graph to be produced")
# elif trigger_source == 'graphs-timeline-lin':
# return html.P("a map")
# return self.graph_timeline
|
StarcoderdataPython
|
1788045
|
<reponame>MarcSaric/variant-filtration-tool<filename>gdc_filtration_tools/tools/format_gdc_vcf.py
"""
This script formats a VCF file header to contain various GDC-specific
metadata attributes:
* fileDate - the date of the processing
* center - The NCI Genomic Data Commons (processing center not sequencing)
* reference - The reference name (GRCh38.d1.vd1.fa)
* INDIVIDUAL - The patient barcode and case id
* SAMPLE - the normal/tumor barcode, aliquot uuid and bam uuid (there will be multiple)
@author: <NAME> <<EMAIL>>
"""
import datetime
from typing import NewType
import pysam
from gdc_filtration_tools.logger import Logger
from gdc_filtration_tools.utils import get_pysam_outmode
VariantFileT = NewType("VariantFileT", pysam.VariantFile)
VcfHeaderT = NewType("VcfHeaderT", pysam.VariantHeader)
def build_header(
reader: VariantFileT,
patient_barcode: str,
case_id: str,
tumor_barcode: str,
tumor_aliquot_uuid: str,
tumor_bam_uuid: str,
normal_barcode: str,
normal_aliquot_uuid: str,
normal_bam_uuid: str,
reference_name: str,
) -> VcfHeaderT:
"""
Takes the user arguments and the input VCF to generate the GDC
formatted header entries and returns the header object.
"""
# First, load the old header, skipping ones that we will update
lst = []
for record in reader.header.records:
if (
record.key == "fileDate"
or record.key == "fileformat"
or record.key == "reference"
):
continue
lst.append(str(record))
# Add GDC specific metadata
lst.extend(
[
"##fileDate={0}".format(datetime.date.today().strftime("%Y%m%d")),
'##center="NCI Genomic Data Commons (GDC)"',
"##reference={0}".format(reference_name),
"##INDIVIDUAL=<NAME={0},ID={1}>".format(patient_barcode, case_id),
"##SAMPLE=<ID=NORMAL,NAME={0},ALIQUOT_ID={1},BAM_ID={2}>".format(
normal_barcode, normal_aliquot_uuid, normal_bam_uuid
),
"##SAMPLE=<ID=TUMOR,NAME={0},ALIQUOT_ID={1},BAM_ID={2}>".format(
tumor_barcode, tumor_aliquot_uuid, tumor_bam_uuid
),
]
)
# Initialize new header object
new_head = pysam.VariantHeader()
for line in lst:
new_head.add_line(line)
# Add samples
for sample in reader.header.samples:
new_head.add_sample(sample)
# Return updated header
return new_head
def format_gdc_vcf(
input_vcf: str,
output_vcf: str,
patient_barcode: str,
case_id: str,
tumor_barcode: str,
tumor_aliquot_uuid: str,
tumor_bam_uuid: str,
normal_barcode: str,
normal_aliquot_uuid: str,
normal_bam_uuid: str,
*,
reference_name: str = "GRCh38.d1.vd1.fa",
) -> None:
"""
Adds VCF header metadata specific to the GDC.
:param input_vcf: The input VCF file to format.
:param output_vcf: The output formatted VCF file to create. BGzip and tabix-index created if ends with '.gz'.
:param patient_barcode: The case submitter id.
:param case_id: The case uuid.
:param tumor_barcode: The tumor aliquot submitter id.
:param tumor_aliquot_uuid: The tumor aliquot uuid.
:param tumor_bam_uuid: The tumor bam uuid.
:param normal_barcode: The normal aliquot submitter id.
:param normal_aliquot_uuid: The normal aliquot uuid.
:param normal_bam_uuid: The normal bam uuid.
:param reference_name: Reference name to use in header.
"""
logger = Logger.get_logger("format_gdc_vcf")
logger.info("Format GDC tumor/normal paired VCFs.")
# setup
reader = pysam.VariantFile(input_vcf)
mode = get_pysam_outmode(output_vcf)
# Load new header
new_header = build_header(
reader,
patient_barcode,
case_id,
tumor_barcode,
tumor_aliquot_uuid,
tumor_bam_uuid,
normal_barcode,
normal_aliquot_uuid,
normal_bam_uuid,
reference_name,
)
writer = pysam.VariantFile(output_vcf, mode=mode, header=new_header)
# Process
try:
for record in reader.fetch():
writer.write(record)
finally:
reader.close()
writer.close()
if mode == "wz":
logger.info("Creating tabix index...")
tbx = pysam.tabix_index(output_vcf, preset="vcf", force=True)
|
StarcoderdataPython
|
1715938
|
<gh_stars>1-10
import Enum
import os
import Validate
from msvcrt import getch
from colorama import init
init(convert=True)
from colorama import Fore, Back, Style
from Enum import StringFore
def Clear(): return os.system('cls')
def GetInput():
while True:
keycode = ord(getch())
if keycode == 13: #Enter
return Enum.Enter
elif keycode == 80: #Down arrow
return Enum.DownArrow
elif keycode == 72: #Up arrow
return Enum.UpArrow
def Print(content):
print(content, end="")
def PrintError(content):
print(f"{Fore.RED}{content}{Fore.RED}{Fore.WHITE}")
def GetPersonName(IsClear=False,regex=None):
Print(Enum.EnterName if not IsClear else "")
name = input()
res = None
if len(name) < 3 or len(name) > 50:
PrintError(Enum.InvNameLen)
return GetPersonName(IsClear,regex)
elif regex == None:
res = Validate.Name(name)
else:
res = Validate.Name(name,regex)
if not res:
PrintError(Enum.InvalidName)
return GetPersonName(IsClear,regex)
return name
def GetCityName(IsClear=False,regex=None):
Print(Enum.EnterCity if not IsClear else "")
city = input()
res = None
if len(city) < 3 or len(city) > 50:
PrintError(Enum.InvCityLen)
return GetCityName(IsClear,regex)
if regex==None:
res = Validate.City(city)
else:
res = Validate.City(city,regex)
if not res:
PrintError(Enum.InvalidCity)
return GetCityName(IsClear,regex)
return city
def GetPhoneNumber(IsClear=False,regex=None):
Print(Enum.EnterPhoneNumber if not IsClear else "")
number = input()
res = None
if len(number) < 3 or len(number) > 50:
PrintError(Enum.InvPhoneLen)
return GetPhoneNumber(IsClear,regex)
if regex==None:
res = Validate.Number(number)
else:
res = Validate.Number(number,regex)
if not res:
PrintError(Enum.InvalidPhone)
return GetPhoneNumber(IsClear,regex)
return number
def SearchByInputInfo(selection):
if selection == Enum.SearchByName:
Print(Enum.EnterName)
elif selection == Enum.SearchByCity:
Print(Enum.EnterCity)
elif selection == Enum.SearchByNumber:
Print(Enum.EnterPhoneNumber)
def IsntSuchAContact(sOption):
string = f"There isn't such a contact with given {sOption}!"
print(StringFore.LIGHTCYAN_EX(string))
def PrintSelectedSearch(db,selectionIndex):
for i in range(len(db)):
if i == selectionIndex:
print(StringFore.GREEN(db[i]))
else:
print(db[i])
def PrintSelectedContact(db,selectionIndex):
for i in range(len(db)):
if i == selectionIndex:
PrintSelectedPerson(db[i])
else:
PrintPerson(db[i])
def PrintPerson(personObj):
print(f"{personObj.name} - {personObj.phoneNumber} | {personObj.city}")
def PrintSelectedPerson(personObj):
print(f"{Fore.GREEN}{personObj.name} - {personObj.phoneNumber} | {personObj.city}{Fore.GREEN}{Fore.WHITE}")
def WaitForEnter():
while True:
key = GetInput()
if key == Enum.Enter:
return
def ConfirmDelete(person):
string = f"{person.name} / {person.phoneNumber} ({person.city})"
print(f"Do you really want to delete contact:\n {StringFore.LIGHTCYAN_EX(string)} ? [yes/no]",end="")
while True:
decition = input().lower()
if decition in ["yes",'y']:
return True
elif decition in ["no",'n']:
return False
else:
print(Enum.InvalidComfirmation)
def ConfirmUpdate(person,name,city,phoneNumber):
print("=" * 20)
print(f"Do you really want to update contact:{Enum.NewLine}")
print(f"Name from {StringFore.LIGHTCYAN_EX(person.name)} to * {StringFore.CYAN(name)} *")
print(f"City from {StringFore.LIGHTCYAN_EX(person.city)} to * {StringFore.CYAN(city)} *")
print(f"Phone Number from {StringFore.LIGHTCYAN_EX(person.phoneNumber)} to * {StringFore.CYAN(phoneNumber)} *")
print(f"Type [yes/no] ",end="")
while True:
decition = input().lower()
if decition in ["yes",'y']:
return True
elif decition in ["no",'n']:
return False
else:
print(Enum.InvalidComfirmation)
|
StarcoderdataPython
|
68027
|
import unittest
from synful import synapse
class TestSynapse(unittest.TestCase):
def test_cluster_synapses(self):
syn_1 = synapse.Synapse(id=1, location_pre=(1, 2, 3),
location_post=(10, 10, 0), id_segm_pre=1,
id_segm_post=10)
syn_2 = synapse.Synapse(id=2, location_pre=(3, 4, 5),
location_post=(12, 14, 0), id_segm_pre=1,
id_segm_post=10)
syn_3 = synapse.Synapse(id=3, location_pre=(0, 0, 0),
location_post=(30, 30, 0), id_segm_pre=1,
id_segm_post=10)
syn_4 = synapse.Synapse(id=4, location_pre=(0, 0, 0),
location_post=(32, 32, 0), id_segm_pre=1,
id_segm_post=10)
syn_5 = synapse.Synapse(id=5, location_pre=(0, 0, 0),
location_post=(10, 10, 0), id_segm_pre=1,
id_segm_post=5)
synapses, removed_ids = synapse.cluster_synapses(
[syn_1, syn_2, syn_3, syn_4, syn_5],
5)
ids = [syn.id for syn in synapses]
self.assertTrue(1 in ids)
self.assertTrue(3 in ids)
self.assertTrue(5 in ids)
self.assertFalse(2 in ids)
self.assertFalse(4 in ids)
self.assertEqual(tuple(synapses[0].location_post), (11, 12, 0))
self.assertEqual(tuple(synapses[0].location_pre), (2, 3, 4))
self.assertTrue(2 in removed_ids)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4806537
|
class Solution(object):
def addToArrayForm(self, num, k):
"""
:type num: List[int]
:type k: int
:rtype: List[int]
"""
# Runtime: 232 ms
# Memory: 13.6 MB
last = 0
ptr = len(num) - 1
while k != 0 or last != 0:
if ptr < 0:
num.insert(0, 0)
ptr = 0
k, mod = divmod(k, 10)
last, num[ptr] = divmod(num[ptr] + mod + last, 10)
ptr -= 1
return num
|
StarcoderdataPython
|
176719
|
"""
This file is part of the Semantic Quality Benchmark for Word Embeddings Tool in Python (SeaQuBe).
Copyright (c) 2021 by <NAME>
:author: <NAME>
"""
import copy
import time
from googletrans import Translator
from seaqube.augmentation.base import SingleprocessingAugmentation
from seaqube.nlp.tools import tokenize_corpus
from seaqube.package_config import log
class TranslationAugmentation(SingleprocessingAugmentation):
"""
Based on the idea from the author (<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME>) of the paper
"QANet: Combining Local Convolution with Global Self-Attention for Reading Comprehension"
The idea is to translate a text into one or several language and translate it then back to the original one. The
idea behind this, is to keep the content of a text but change it surface.
The translator engine still is google translate, as long as it works
@misc{yu2018qanet,
title={QANet: Combining Local Convolution with Global Self-Attention for Reading Comprehension},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2018},
eprint={1804.09541},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
def __init__(self, base_lang='en', max_length: int = 100, remove_duplicates: bool = False, timeout: int = None,
multiprocess: bool = True, seed: int = None):
"""
Set up the translator for a given start / base language, default is en
Args:
base_lang: from where to and back translate and
max_length: cut the produced text at a limit to prevent overflow
remove_duplicates: remove after augmentation for duplicates
timeout: number in seconds to wait until to fire next translation
multiprocess: if augmentation class implements the multiprocessing call, then it can be turn off again with
this flag, most for testing purpose
seed: fix the randomness with a seed for testing purpose
"""
super(TranslationAugmentation, self).__init__()
self.translator = Translator()
self.base_lang = base_lang
self.max_length = max_length
self.remove_duplicates = remove_duplicates
self.multiprocess = multiprocess
self.seed = seed
self.timeout = timeout
self.last_call = 0.0
def get_config(self):
"""
Gives a dict with all relevant variables the object can recreated with (init parameters)
Returns: dict of object config
"""
return dict(base_lang=self.base_lang, max_length=self.max_length,
remove_duplicates=self.remove_duplicates, seed=self.seed, class_name=str(self))
def shortname(self):
return "googletranslate"
def input_type(self):
"""
Which return type is supported
Returns: doc or text
"""
return "text"
def augmentation_implementation(self, sentence):
return self.translate_doc(sentence)
def __handle_timeout(self):
if self.timeout is None:
return
diff = time.time() - self.last_call - self.timeout
if diff < 0:
time.sleep(abs(diff))
self.last_call = time.time()
return
def translate_doc(self, text):
translation_pipelines = [
['fr'],
['de'],
['sv'],
['de', 'fr'],
['ja'],
['la'],
['ko'],
['nl']
]
texts = []
for translation_pipeline in translation_pipelines:
translation_pipeline = [self.base_lang] + translation_pipeline + [self.base_lang]
tmp_text = copy.deepcopy(text)
for i, lang in enumerate(translation_pipeline):
next_lang = translation_pipeline[i+1]
try:
self.__handle_timeout()
tmp_text = self.translator.translate(tmp_text, dest=next_lang, src=lang).text
except Exception:
log.info("Some translation did not work, we try it later again")
if next_lang == self.base_lang:
# Chain is finished
break
texts.append(tmp_text)
return tokenize_corpus(texts[0: self.max_length], verbose=False)
|
StarcoderdataPython
|
1692803
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Anomaly and artefact detection and annotation generation."""
import logging
from itertools import chain
from neurom.core.dataformat import COLS
L = logging.getLogger(__name__)
def generate_annotation(result, settings):
"""Generate the annotation for a given checker.
Arguments:
result: the result of the checker
settings: the display settings for NeuroLucida
Returns
An S-expression-like string representing the annotation
"""
if result.status:
return ""
header = ("\n\n"
"({label} ; MUK_ANNOTATION\n"
" (Color {color}) ; MUK_ANNOTATION\n"
" (Name \"{name}\") ; MUK_ANNOTATION").format(**settings)
points = [p for _, _points in result.info for p in _points]
annotations = (" ({0} {1} {2} 0.50) ; MUK_ANNOTATION".format(
p[COLS.X], p[COLS.Y], p[COLS.Z]) for p in points)
footer = ") ; MUK_ANNOTATION\n"
return '\n'.join(chain.from_iterable(([header], annotations, [footer])))
def annotate(results, settings):
"""Concatenate the annotations of all checkers."""
annotations = (generate_annotation(result, setting)
for result, setting in zip(results, settings))
return '\n'.join(annot for annot in annotations if annot)
|
StarcoderdataPython
|
1759311
|
<filename>src/uwds3_core/underworlds_core.py<gh_stars>1-10
import rospy
import numpy as np
import sensor_msgs
import tf2_ros
import math
import cv2
import message_filters
from cv_bridge import CvBridge
import geometry_msgs
from tf2_ros import Buffer, TransformListener, TransformBroadcaster
from .utils.transformations import *
from .detection.opencv_dnn_detector import OpenCVDNNDetector
from .detection.hog_face_detector import HOGFaceDetector
from .storage.internal_simulator import InternalSimulator
from .estimation.dense_optical_flow_estimator import DenseOpticalFlowEstimator
from .estimation.facial_landmarks_estimator import FacialLandmarksEstimator, NOSE, POINT_OF_SIGHT
from .estimation.head_pose_estimator import HeadPoseEstimator
from .features.visual_features_extractor import VisualFeaturesExtractor
from .tracking.tracker import Tracker
from .tracking.human_tracker import HumanTracker
from .tracking.linear_assignment import iou_distance
def transformation_matrix(t, q):
translation_mat = translation_matrix(t)
rotation_mat = quaternion_matrix(q)
return np.dot(translation_mat, rotation_mat)
class UnderworldsCore(object):
def __init__(self):
self.tf_buffer = Buffer()
self.tf_listener = TransformListener(self.tf_buffer)
self.tf_broadcaster = TransformBroadcaster()
self.rgb_image_topic = rospy.get_param("~rgb_image_topic", "/camera/rgb/image_raw")
self.depth_image_topic = rospy.get_param("~depth_image_topic", "/camera/depth/image_raw")
self.camera_info_topic = rospy.get_param("~camera_info_topic", "/camera/rgb/camera_info")
self.base_frame_id = rospy.get_param("~base_frame_id", "base_link")
self.global_frame_id = rospy.get_param("~global_frame_id", "map")
self.use_gui = rospy.get_param("~use_gui", True)
rospy.loginfo("Subscribing to /{} topic...".format(self.camera_info_topic))
self.camera_info = None
self.camera_frame_id = None
self.camera_info_subscriber = rospy.Subscriber(self.camera_info_topic, sensor_msgs.msg.CameraInfo, self.camera_info_callback)
self.detector_model_filename = rospy.get_param("~detector_model_filename", "")
self.detector_weights_filename = rospy.get_param("~detector_weights_filename", "")
self.detector_config_filename = rospy.get_param("~detector_config_filename", "")
self.detector = OpenCVDNNDetector(self.detector_model_filename,
self.detector_weights_filename,
self.detector_config_filename,
300)
self.face_detector = HOGFaceDetector()
self.visual_features_extractor = VisualFeaturesExtractor("MobileNetV2", weights="imagenet")
self.internal_simulator = InternalSimulator()
self.shape_predictor_config_filename = rospy.get_param("~shape_predictor_config_filename", "")
self.optical_flow_estimator = DenseOpticalFlowEstimator()
self.flow = None
self.bridge = CvBridge()
self.body_parts = ["person", "face", "right_hand", "left_hand"]
self.object_tracker = Tracker(iou_distance, min_distance=0.7)
self.human_tracker = HumanTracker(self.shape_predictor_config_filename)
self.use_depth = rospy.get_param("~use_depth", False)
self.n_frame = rospy.get_param("~n_frame", 2)
self.frame_count = 0
self.only_faces = rospy.get_param("~only_faces", True)
self.visualization_publisher = rospy.Publisher("uwds3_core/visualization_image", sensor_msgs.msg.Image, queue_size=1)
self.previous_head_pose = {}
if self.use_depth is True:
self.rgb_image_sub = message_filters.Subscriber(self.rgb_image_topic, sensor_msgs.msg.Image)
self.depth_image_sub = message_filters.Subscriber(self.depth_image_topic, sensor_msgs.msg.Image)
self.sync = message_filters.TimeSynchronizer([self.rgb_image_sub, self.depth_image_sub], 10)
self.sync.registerCallback(self.observation_callback_with_depth)
else:
self.rgb_image_sub = rospy.Subscriber(self.rgb_image_topic, sensor_msgs.msg.Image, self.observation_callback, queue_size=1)
#self.depth_estimator = DepthEstimator()
#self.sync = message_filters.TimeSynchronizer([self.tracks_subscriber, self.rgb_image_subscriber, self.depth_image_subscriber], 10)
#self.sync.registerCallback(self.observation_callback)
def camera_info_callback(self, msg):
if self.camera_info is None:
rospy.loginfo("Camera info received !")
self.camera_info = msg
self.camera_frame_id = msg.header.frame_id
self.camera_matrix = np.array(msg.K).reshape((3, 3))
self.dist_coeffs = np.array(msg.D)
def get_last_transform_from_tf2(self, source_frame, target_frame):
try:
trans = self.tf_buffer.lookup_transform(source_frame, target_frame, rospy.Time(0))
x = trans.transform.translation.x
y = trans.transform.translation.y
z = trans.transform.translation.z
rx = trans.transform.rotation.x
ry = trans.transform.rotation.y
rz = trans.transform.rotation.z
rw = trans.transform.rotation.w
return True, [x, y, z], [rx, ry, rz, rw]
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
return False, [0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]
def observation_callback(self, rgb_image_msg):
if self.camera_info is not None:
bgr_image = self.bridge.imgmsg_to_cv2(rgb_image_msg)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
viz_frame = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
timer1 = cv2.getTickCount()
detections = []
if self.frame_count % self.n_frame == 0:
detections = self.face_detector.detect(rgb_image)
if self.frame_count % self.n_frame == 1:
detections = self.detector.detect(rgb_image)
self.frame_count += 1
human_detections = [d for d in detections if d.class_label in self.body_parts]
object_detections = [d for d in detections if d.class_label not in self.body_parts]
object_tracks = self.object_tracker.update(rgb_image, object_detections)
human_tracks = self.human_tracker.update(rgb_image, human_detections, self.camera_matrix, self.dist_coeffs)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer1)
detection_fps = "Detection and track fps : %0.4fhz" % fps
#print(detection_fps)
cv2.putText(viz_frame, detection_fps, (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
for track in self.human_tracker.tracks:
#if track.is_confirmed():
tl_corner = (int(track.bbox.left()), int(track.bbox.top()))
br_corner = (int(track.bbox.right()), int(track.bbox.bottom()))
# if track.class_label == "face":
# shape = self.landmark_estimator.estimate(rgb_image, track)
# for (x, y) in shape:
# cv2.circle(viz_frame, (x, y), 1, (0, 255, 0), -1)
if track.class_label == "face":
rot = track.rotation
trans = track.translation
#rot, trans = track.get_head_pose()
if rot is not None and trans is not None:
transform = geometry_msgs.msg.TransformStamped()
transform.header.stamp = rospy.Time.now()
transform.header.frame_id = self.camera_frame_id
transform.child_frame_id = "gaze"
transform.transform.translation.x = trans[0]
transform.transform.translation.y = trans[1]
transform.transform.translation.z = trans[2]
q_rot = quaternion_from_euler(rot[0], rot[1], rot[2], "rxyz")
transform.transform.rotation.x = q_rot[0]
transform.transform.rotation.y = q_rot[1]
transform.transform.rotation.z = q_rot[2]
transform.transform.rotation.w = q_rot[3]
self.tf_broadcaster.sendTransform(transform)
if track.uuid not in self.internal_simulator:
self.internal_simulator.load_urdf(track.uuid, "face.urdf", trans, q_rot)
self.internal_simulator.update_entity(track.uuid, trans, q_rot)
self.internal_simulator.get_human_visibilities(trans, q_rot)
cv2.drawFrameAxes(viz_frame, self.camera_matrix, self.dist_coeffs, np.array(rot).reshape((3,1)), np.array(trans).reshape(3,1), 0.03)
cv2.putText(viz_frame, track.uuid[:6], (tl_corner[0]+5, tl_corner[1]+25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (240, 0, 0), 2)
cv2.putText(viz_frame, track.class_label, (tl_corner[0]+5, tl_corner[1]+45), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (240, 0, 0), 2)
cv2.rectangle(viz_frame, tl_corner, br_corner, (255, 255, 0), 2)
viz_img_msg = self.bridge.cv2_to_imgmsg(viz_frame)
self.visualization_publisher.publish(viz_img_msg)
|
StarcoderdataPython
|
131512
|
<gh_stars>0
from mpi4pyve import MPI
import mpiunittest as unittest
class BaseTestMessageZero(object):
null_b = [None, MPI.INT]
null_v = [None, (0, None), MPI.INT]
def testPointToPoint(self):
comm = self.COMM
comm.Sendrecv(sendbuf=self.null_b, dest=comm.rank,
recvbuf=self.null_b, source=comm.rank)
r2 = comm.Irecv(self.null_b, comm.rank)
r1 = comm.Isend(self.null_b, comm.rank)
MPI.Request.Waitall([r1, r2])
def testCollectivesBlock(self):
comm = self.COMM
comm.Bcast(self.null_b)
comm.Gather(self.null_b, self.null_b)
comm.Scatter(self.null_b, self.null_b)
comm.Allgather(self.null_b, self.null_b)
comm.Alltoall(self.null_b, self.null_b)
def testCollectivesVector(self):
comm = self.COMM
comm.Gatherv(self.null_b, self.null_v)
comm.Scatterv(self.null_v, self.null_b)
comm.Allgatherv(self.null_b, self.null_v)
comm.Alltoallv(self.null_v, self.null_v)
@unittest.skip('necmpi')
@unittest.skipMPI('openmpi')
@unittest.skipMPI('SpectrumMPI')
def testReductions(self):
comm = self.COMM
comm.Reduce(self.null_b, self.null_b)
comm.Allreduce(self.null_b, self.null_b)
comm.Reduce_scatter_block(self.null_b, self.null_b)
rcnt = [0]*comm.Get_size()
comm.Reduce_scatter(self.null_b, self.null_b, rcnt)
try: comm.Scan(self.null_b, self.null_b)
except NotImplementedError: pass
try: comm.Exscan(self.null_b, self.null_b)
except NotImplementedError: pass
class TestMessageZeroSelf(BaseTestMessageZero, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestMessageZeroWorld(BaseTestMessageZero, unittest.TestCase):
COMM = MPI.COMM_WORLD
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
33200
|
print("linear search")
si=int(input("\nEnter the size:"))
data=list()
for i in range(0,si):
n=int(input())
data.append(n)
cot=0
print("\nEnter the number you want to search:")
val=int(input())
for i in range(0,len(data)):
if(data[i]==val):
break;
else:
cot=cot+1
print(cot)#linear search result=4
#binary search
print("\nBinary Search")
cot=0
beg=0
end=len(data)
mid=(beg+end)/2
mid=int(mid)
while beg<end and val!=data[mid]:
if val>data[mid]:
beg=mid+1
else:
end=mid-1
mid=int((beg+end)/2)
cot=cot+1
if 14==data[mid]:
print("\nDATA FOUND")
print(cot)
|
StarcoderdataPython
|
38044
|
""" Functions for working with tabix dosages in pandas dataframes
"""
import gzip
import numpy as np
import pandas as pd
import pysam
import statsmodels.api as sm
class Dosage(object):
def __init__(self, dosages, annotations, gene_name):
# Match up the annotation dataframe with the dosage dataframe
mindex = np.intersect1d(np.asarray(dosages.index, dtype=str),
np.asarray(annotations.index, dtype=str))
self.annot = annotations.loc[mindex, :]
ordering = self.annot.ix[:, 'pos'].argsort()
self.annot = self.annot.iloc[ordering, :]
self.dosages = dosages.ix[mindex, :]
self.dosages = self.dosages.iloc[ordering, :]
self.gene_name = gene_name
def run_eQTL(self, count_matrix, covariates, extra_snps=None):
#self.pvalues = self.dosages.apply()
pvalues = self.dosages.apply(eQTL_func, axis=1, args=(covariates,
count_matrix.ix[self.gene_name, :]))
self.pvalues = pvalues
def get_dosages_by_range(chrm, start, end, gene_name, annotation_file,
dosage_df, mapping=None):
"""
Fuzzy mapping between annotation and genotypes
Returns Dosage instance.
"""
ann_file = pysam.Tabixfile(annotation_file)
ann_v = ann_file.fetch(chrm, start, end)
rsIDs = []
pos = []
ref = []
alt = []
for i in ann_v:
i = i.split("\t")
rsIDs.append(i[3])
pos.append(int(i[1]))
ref.append(i[6])
alt.append(i[7])
annot = pd.DataFrame({'pos': pos, 'ref': ref, 'alt': alt}, index=pd.Index(rsIDs))
comb_iter = []
for dos in dosage_df:
mindex = np.intersect1d(np.asarray(dos.index, dtype=str),
np.asarray(annot.index, dtype=str))
if len(mindex) > 0:
comb_iter.append(dos.ix[mindex, :])
else:
pass
out_dos = pd.concat(comb_iter)
'''
dosages = pd.read_csv(dosage_path + path, sep=" ", header=None,
index_col = 0, skiprows=roughly_first,
nrows=roughly_end-roughly_first, names=col_names.columns)
'''
print(annot.shape, out_dos.shape, gene_name)
return Dosage(out_dos, annot, gene_name)
def generate_dosage_mapping(dosage_file, mapping_file = None, interval=50):
"""
Returns dictionary of rsIDs: fileposition from a dosage file
"""
if not mapping_file:
with open(dosage_file) as fh:
fh.next()
t = 0
debug = 0
f_i = {}
for i, j in enumerate(fh):
if i % 50 == 0:
f_i[j.split(" ")[0]] = i - 1
else: pass
return(f_i)
def eQTL_func(snps, cov, expression):
"""
"""
cov = cov.T
cov['snps'] = snps
cov = sm.add_constant(cov)
model = sm.OLS(expression, cov)
return(model.fit().pvalues['snps'])
class eQTL(object):
""" Python class for completing eQTLs. Does lazy loading of all large
files.
"""
def __init__(self, dosages_path, expression, vannotation):
self.dosage = dosages_path
self.expression = expression
self.vannotations = vannotations
def generate_mapping():
pass
"""
if mapping:
for i in ann_v:
rsID = i.split("\t")[3]
try:
roughly_first = mapping[rsID]
rsIDs.append(rsID)
pos.append(int(i.split("\t")[1]))
break
except KeyError:
pass
for i in ann_v:
i = i.split("\t")
try:
roughly_end = mapping[i[3]]
except KeyError:
pass
pos.append(int(i[1]))
rsIDs.append(i[3])
"""
def get_annotation(annotation, chrm):
ann_file = pysam.Tabixfile(annotation)
ann_v = ann_file.fetch(chrm)
rsIDs = []
pos = []
ref = []
alt = []
for i in ann_v:
i = i.split("\t")
rsIDs.append(i[3])
pos.append(int(i[1]))
ref.append(i[6])
alt.append(i[7])
annot = pd.DataFrame({'pos': pos, 'ref': ref, 'alt': alt}, index=pd.Index(rsIDs))
return(annot)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.