code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import unittest
from spyd.authentication.services.vanilla.auth_success import VanillaAuthSuccess
class TestVanillaAuthSuccess(unittest.TestCase):
def setUp(self):
self.instance = VanillaAuthSuccess('localhost', 'chasm')
def test_get_group_names(self):
group_names = self.instance.group_provider.get_group_names()
self.assertEqual(group_names, ('localhost.auth', 'chasm@localhost'))
def test_repr(self):
self.assertEqual(repr(self.instance.group_provider), '<VanillaGroupProvider chasm@localhost>')
|
[
"spyd.authentication.services.vanilla.auth_success.VanillaAuthSuccess"
] |
[((193, 233), 'spyd.authentication.services.vanilla.auth_success.VanillaAuthSuccess', 'VanillaAuthSuccess', (['"""localhost"""', '"""chasm"""'], {}), "('localhost', 'chasm')\n", (211, 233), False, 'from spyd.authentication.services.vanilla.auth_success import VanillaAuthSuccess\n')]
|
import tekore as tk
app_token = tk.request_client_token("b516728497b34264afab4e995b4e2569", "<KEY>")
spotify = tk.Spotify(app_token)
def menu():
print("search for:\n1. Artist\n2. Album\n3. Track ")
num = input("type your input there : ")
if num == "1":
artist_name = input("what's the artist name : ")
searh_string = "artist:" + artist_name
artists, = spotify.search(searh_string, types=('track',), limit=50)
print_article(artists)
elif num == "2":
album_name = input("what's the album name : ")
searh_string = "album:" + album_name
album, = spotify.search(searh_string, types=('track',), limit=50)
print_article(album)
elif num == "3":
track_name = input("what's the track name : ")
tracks, = spotify.search(track_name, types=('track',), limit=50)
print_article(tracks)
else:
print("what did you just type? try again!")
menu()
def print_article(element):
print ("{:<10} {:<70} {:<40}".format("popularity", "name", "artist"))
for elem in element.items:
print ("{:<10} {:<70} {:<40}".format(elem.popularity , elem.name, elem.artists[0].name))
if __name__ == '__main__':
menu()
|
[
"tekore.Spotify",
"tekore.request_client_token"
] |
[((33, 101), 'tekore.request_client_token', 'tk.request_client_token', (['"""b516728497b34264afab4e995b4e2569"""', '"""<KEY>"""'], {}), "('b516728497b34264afab4e995b4e2569', '<KEY>')\n", (56, 101), True, 'import tekore as tk\n'), ((112, 133), 'tekore.Spotify', 'tk.Spotify', (['app_token'], {}), '(app_token)\n', (122, 133), True, 'import tekore as tk\n')]
|
"""
Generates a path on the given occupancy grid (map of
the environment)
"""
import networkx as nx
from grid_loader import Grid
import numpy as np
def euclidean(node1, node2):
x1, y1 = node1
x2, y2 = node2
return ((x1-x2)**2+(y1-y2)**2)**0.5
class AStar:
# Constructor
def __init__(self):
self.graph = None
self.grid_res = None # m / pixel
def load_grid(self, grid_obj: Grid, occ_thresh = 0.5):
"""
Load a given Grid object into a networkx graph
The edges are given a weight 1 and the occupied
cells are removed
Parameters:
- grid_obj: Grid
A Grid object that is to be loaded for path
finding
- occ_thresh: float (default: 0.5)
A threshold value for depicting occupied cell
If cell value >= occ_thresh, it is considered
occupied and removed
Returns:
- removed_nodes: int
The number of nodes that were removed from
grid (number of occupied cells)
"""
self.grid_res = grid_obj.grid_res # Useful for translation from px to m and back
self.graph = nx.grid_2d_graph(grid_obj.w, grid_obj.h)
removed_nodes = 0
for i in range(grid_obj.w):
for j in range(grid_obj.h):
if grid_obj.grid_data[i, j] >= occ_thresh: # Occupied
self.graph.remove_node((i, j))
removed_nodes += 1
# Set edge properties of the graph
nx.set_edge_attributes(self.graph, {e: 1 for e in self.graph.edges()}, "cost")
return removed_nodes
# Return a route of [x, y] points
def get_route(self, start, end, heuristic = euclidean, weight = 0.5):
start_px = tuple((np.array(start) / self.grid_res).astype(int))
end_px = tuple((np.array(end) / self.grid_res).astype(int))
astar_path = nx.astar_path(self.graph, start_px, end_px,
heuristic=lambda n1, n2: weight*heuristic(n1, n2), weight="cost")
astar_path = np.array(astar_path)
astar_path_m = astar_path * self.grid_res
return astar_path_m
|
[
"numpy.array",
"networkx.grid_2d_graph"
] |
[((1256, 1296), 'networkx.grid_2d_graph', 'nx.grid_2d_graph', (['grid_obj.w', 'grid_obj.h'], {}), '(grid_obj.w, grid_obj.h)\n', (1272, 1296), True, 'import networkx as nx\n'), ((2139, 2159), 'numpy.array', 'np.array', (['astar_path'], {}), '(astar_path)\n', (2147, 2159), True, 'import numpy as np\n'), ((1861, 1876), 'numpy.array', 'np.array', (['start'], {}), '(start)\n', (1869, 1876), True, 'import numpy as np\n'), ((1931, 1944), 'numpy.array', 'np.array', (['end'], {}), '(end)\n', (1939, 1944), True, 'import numpy as np\n')]
|
'''
Created on Jan 25, 2017
@author: metelko
'''
# riaps:keep_import:begin
from riaps.run.comp import Component
import logging
import time
import os
# riaps:keep_import:end
class TempSensor(Component):
# riaps:keep_constr:begin
def __init__(self):
super(TempSensor, self).__init__()
self.pid = os.getpid()
self.temperature = 65
now = time.ctime(int(time.time()))
self.logger.info("(PID %s)-starting TempSensor, %s" % (str(self.pid),str(now)))
self.logger.info("Initial temp:%d, %s" % (self.temperature,str(now)))
# riaps:keep_constr:end
# riaps:keep_clock:begin
def on_clock(self):
now = time.ctime(int(time.time()))
msg = self.clock.recv_pyobj()
self.temperature = self.temperature + 1
msg = str(self.temperature)
msg = (now,msg)
self.logger.info("on_clock(): Temperature - %s, PID %s, %s" % (str(msg[1]),str(self.pid),str(now)))
self.ready.send_pyobj(msg)
# riaps:keep_clock:end
# riaps:keep_impl:begin
def __destroy__(self):
now = time.time()
self.logger.info("%s - stopping TempSensor, %s" % (str(self.pid),now))
# riaps:keep_impl:end
|
[
"os.getpid",
"time.time"
] |
[((315, 326), 'os.getpid', 'os.getpid', ([], {}), '()\n', (324, 326), False, 'import os\n'), ((1061, 1072), 'time.time', 'time.time', ([], {}), '()\n', (1070, 1072), False, 'import time\n'), ((386, 397), 'time.time', 'time.time', ([], {}), '()\n', (395, 397), False, 'import time\n'), ((669, 680), 'time.time', 'time.time', ([], {}), '()\n', (678, 680), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
from discord import Embed, Member
from discord.ext.commands import command, Cog, Context
import time
from dataclasses import dataclass
@dataclass
class Job:
title: str
salary: float
responsibilites: str
requirements: str
JOBS = [
Job(
"Backend Specialist",
0.0,
"Design, develop, and maintain a persistent data storage solution for the `-_-` bot. "
"Also, seek a more reliable hosting platform on which to deploy the bot.",
"Must have some experience with backend development, whether in a web development "
"context or otherwise. Must have solid understanding of Python programming and "
"working understanding of git and GitHub. Experience with discord.py and asynchronous "
"programming is beneficial but not required; on-the-job training is available.",
),
Job(
"Discord Bot Developer",
0.0,
"Skip the tutorial - work right at the cutting edge! Develop the newest and coolest "
"features for our very own `-_-` bot. Enjoy both an educational and rewarding work "
"environment. Aditionally, perform a basic beta testing and quality assurance role. ",
"Must have proficient level of Python understanding and basic level of git/GitHub "
"experience. No experience with discord.py necessary. Significant on-the-job training "
"is available. Specify additional qualifications upon application.",
),
Job(
"Senior Marketing Manager",
0.0,
"Encourage more server members to use the `-_-` bot on a regular basis. Coordinate with "
"frequent users to gather and prioritize requested features. Recruit more developer to "
"fill vacancies on the development team. Communicate results directly with the "
"development team.",
"Must have excellent communication and teamwork skills. No technical skills required. "
"An excellent entry-level position for aspiring members.",
),
]
class Develop(Cog):
"""Tools and utilites for bot developers."""
def __init__(self, bot):
self.bot = bot
self.start_time = time.time()
@property
def latency(self):
"""Returns the latency in milliseconds"""
return round(1000 * self.bot.latency, 3)
@command()
async def ping(self, ctx: Context):
"""Ping the bot for a response and latency."""
await ctx.send(embed=Embed(
title="Pong!",
description=f"Latency: {self.latency} ms",
))
@command()
async def stats(self, ctx: Context):
"""Returns some stats about this bot."""
time_delta = time.time() - self.start_time
result = Embed(
title="-_- Bot Stats",
description=f"Up time: {round(time_delta, 3)} s\n"
f"Latency: {self.latency} ms\n"
f"Guilds: {len(self.bot.guilds)}"
)
await ctx.send(embed=result)
@command()
async def demoji(self, ctx: Context, emoji: str):
"""Get the Unicode codepoint of an emoji (or any character)."""
hexpoint = str(hex(ord(emoji)))[2:]
codepoint = "\\U" + "0" * (8 - len(hexpoint)) + hexpoint
await ctx.send(f"`{codepoint}`")
@command()
async def develop(self, ctx: Context, member: Member = None):
# default to person who called the command
if member is None:
if not isinstance(ctx.author, Member):
await ctx.send("That's a user but not a member. "
"Please try again or report a bug.")
return
member = ctx.author
developer_role = ctx.guild.get_role(731262064391356487)
# member already has role
if developer_role in member.roles:
await ctx.send(f"{member.mention}, you're already a {developer_role.mention}! "
f"Congratulations!")
return
await member.add_roles(developer_role)
await ctx.send(f"Congratulations, {member.mention}, you are now an official "
f"{developer_role.mention} member! Please see `CONTRIBUTING.md` "
f"in `-_- source` to get started. Please also reach out to another "
f"developer at your earliest convenience. ")
@command(aliases=['job'])
async def jobs(self, ctx: Context):
if JOBS:
description = ("Exciting job offers are currently available!\n"
"To apply, do `-_- develop`, then contact any developer.\n\n")
description += "\n\n".join([
f"**{job.title}**\n"
f"*Salary*: ${job.salary}/hr\n"
f"*Responsibilities*: {job.responsibilites}\n"
f"*Requirements*: {job.requirements}"
for job in JOBS
])
else:
description = ("No jobs are available at this time.\n"
"Check back later for updates!")
await ctx.send(embed=Embed(
title="-_- Job Opportunities",
description=description,
))
def setup(bot):
bot.add_cog(Develop(bot))
|
[
"discord.ext.commands.command",
"discord.Embed",
"time.time"
] |
[((2336, 2345), 'discord.ext.commands.command', 'command', ([], {}), '()\n', (2343, 2345), False, 'from discord.ext.commands import command, Cog, Context\n'), ((2576, 2585), 'discord.ext.commands.command', 'command', ([], {}), '()\n', (2583, 2585), False, 'from discord.ext.commands import command, Cog, Context\n'), ((3016, 3025), 'discord.ext.commands.command', 'command', ([], {}), '()\n', (3023, 3025), False, 'from discord.ext.commands import command, Cog, Context\n'), ((3308, 3317), 'discord.ext.commands.command', 'command', ([], {}), '()\n', (3315, 3317), False, 'from discord.ext.commands import command, Cog, Context\n'), ((4392, 4416), 'discord.ext.commands.command', 'command', ([], {'aliases': "['job']"}), "(aliases=['job'])\n", (4399, 4416), False, 'from discord.ext.commands import command, Cog, Context\n'), ((2181, 2192), 'time.time', 'time.time', ([], {}), '()\n', (2190, 2192), False, 'import time\n'), ((2697, 2708), 'time.time', 'time.time', ([], {}), '()\n', (2706, 2708), False, 'import time\n'), ((2470, 2533), 'discord.Embed', 'Embed', ([], {'title': '"""Pong!"""', 'description': 'f"""Latency: {self.latency} ms"""'}), "(title='Pong!', description=f'Latency: {self.latency} ms')\n", (2475, 2533), False, 'from discord import Embed, Member\n'), ((5100, 5161), 'discord.Embed', 'Embed', ([], {'title': '"""-_- Job Opportunities"""', 'description': 'description'}), "(title='-_- Job Opportunities', description=description)\n", (5105, 5161), False, 'from discord import Embed, Member\n')]
|
import cv2
from model import PersonSegmentation
if __name__ == '__main__':
# change 'cpu' to 'cuda' if you have pytorch cuda and your discrete GPU has enough VRAM
# output size will autoscale to fit input image aspect ratio
# if you want full image resolution set 'is_resize=False'
ps = PersonSegmentation('cpu', is_resize=True, resize_size=480)
filename = r"test_image.png"
seg_map = ps.person_segment(filename)
frame, frame_original = ps.decode_segmap(seg_map, filename)
# skin_frame, skin2img_ratio = ps.skin_segment(frame)
skin_frame, skin2img_ratio = ps.skin_segment_pro(frame)
print(f"Skin to Image Percentage: {100 * skin2img_ratio:.2f}%")
cv2.imshow("Original vs Person Seg vs Skin segmented", cv2.vconcat([frame_original, frame, skin_frame]))
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.waitKey",
"cv2.destroyAllWindows",
"model.PersonSegmentation",
"cv2.vconcat"
] |
[((304, 362), 'model.PersonSegmentation', 'PersonSegmentation', (['"""cpu"""'], {'is_resize': '(True)', 'resize_size': '(480)'}), "('cpu', is_resize=True, resize_size=480)\n", (322, 362), False, 'from model import PersonSegmentation\n'), ((802, 816), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (813, 816), False, 'import cv2\n'), ((821, 844), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (842, 844), False, 'import cv2\n'), ((748, 796), 'cv2.vconcat', 'cv2.vconcat', (['[frame_original, frame, skin_frame]'], {}), '([frame_original, frame, skin_frame])\n', (759, 796), False, 'import cv2\n')]
|
"""
AnalysisConfig.py:
Centralized configuration for the nu_e CCQE analysis:
signal definitions (nu vs. anti-nu),
file locations,
etc.
Original author: <NAME> (<EMAIL>)
January 2014
"""
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
#ROOT.gErrorIgnoreLevel = ROOT.kWarning
ROOT.gROOT.SetBatch()
import math
import argparse
import os
import sys
import pprint
import re
BLUEARC = "/minerva/data/users/{}/nu_e".format(os.environ["USER"])
GIRDOUTPUT ="/pnfs/minerva/persistent/"
SIDEBANDS=["Excess_High_Inline","Excess_Low_Inline","Pi0"]
class _AnalysisConfig(object):
Defaults = {
"data_types": ["data", "mc",],
}
ALLOWED_HELICITIES = ["FHC", "RHC"]
def __init__(self, **kwargs):
params = _AnalysisConfig.Defaults.copy()
params.update(kwargs)
self._config_keys = set()
for key, value in params.items():
self._config_keys.add(key)
setattr(self, key, value)
retained_dts = []
for dt in self.data_types:
if (self.mc_only and "data" in dt.lower()) or (self.data_only and "mc" in dt.lower()):
continue
retained_dts.append(dt)
self.data_types = retained_dts
def __repr__(self):
my_dict = dict([ (k, getattr(AnalysisConfig, k)) for k in AnalysisConfig._config_keys ])
return pprint.pformat(my_dict)
#### properties ####
#@property
#def bknd_constraint_method(self):
# return BKND_CONSTRAINT_METHOD
@property
def helicity(self):
return self._helicity
@helicity.setter
def helicity(self, value):
value_upper = value.upper()
if value_upper not in _AnalysisConfig.ALLOWED_HELICITIES:
raise NameError("Allowed helicities are '%s', not '%s'" % (value, _AnalysisConfig.ALLOWED_HELICITIES))
if "helicity" not in self._config_keys:
self._config_keys.add("helicity")
self._helicity = value_upper
#@property
#def POT(self):
# if self.processing_pass == "Resurrection":
# return POT
# else:
# raise Exception("Don't have POT data for processings other than Resurrection!")
#@property
#def POT_string(self, precision=2):
# exponent = int(math.log10(self.POT["data"]))
# mantissa = self.POT["data"] / 10**exponent
# fmt_string = "%%.%(precision)df #times 10^{%%d} P.O.T." % {"precision": precision}
# return fmt_string% (mantissa, exponent)
# @property
# def data_MC_POT_ratio(self):
# return self.POT["data"] / self.POT["MC"]
# @property
# def processing_pass(self):
# return self._processing_pass
# @processing_pass.setter
# def processing_pass(self, value):
# value_cap = value.lower().capitalize()
# if value_cap not in _AnalysisConfig.ALLOWED_PROCESSING_PASSES:
# raise NameError("Allowed processing passes are '%s', not '%s'" % (value, _AnalysisConfig.ALLOWED_PROCESSING_PASSES))
# if "processing_pass" not in self._config_keys:
# self._config_keys.add("processing_pass")
# self._processing_pass = value_cap
@property
def right_sign_electron_pdg(self):
""" Sign of the PDG code for "right sign" in this configuration """
return 1 if self.helicity == "FHC" else -1 # electron is +11; positron is -11
#### public interface ####
def DataTypeNtupleList(self):
filename_config = {
"proc_pass": self.processing_pass,
"helicity": self.helicity,
}
for dt in self.data_types:
filename_config.update( { "data_type": dt } )
filename = NTUPLE_FILENAME_FORMAT % filename_config
# print " searching for filename:", filename
fullpaths = []
# print "looking here:", NTUPLE_PATH
for root, dirs, files in os.walk(NTUPLE_PATH):
# print "considering filenames:", files
if filename in files:
fullpaths.append(os.path.join(root, filename))
if len(fullpaths) == 1:
# print "Found ntuple list for specification '%s':" % dt, fullpaths[-1]
yield dt, fullpaths[-1]
elif len(fullpaths) > 1:
raise Exception("Multiple matches for data type specification '%s': %s" % (dt, fullpaths))
else:
continue
def FilePath(self, top_dir,tier_tag, playlist, data_tag, type_tag):
return "{}/{}_{}{}_{}".format(top_dir,tier_tag, data_tag,
playlist, type_tag)
# print signal_defn
# signal_defn = signal_defn if signal_defn is not None else self.signal_defn
# signal_defn = SIGNAL_DEFN_TEXT[signal_defn]
# print signal_defn
# path = os.path.join(TOP_DIR, signal_defn, dirname)
# if subdir:
# path = os.path.join(path, subdir)
# if helicity:
# path = os.path.join(path, self.helicity)
# return path
def SelectionHistoPath(self, playlist, is_data, is_output = True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"kin_dist"+("test" if self.testing else ""), playlist,
"data" if is_data else "mc",
self.selection_tag+"_"+self.ntuple_tag+
("_"+str(self.count[0]) if self.count[0] is not None else "")+
".root")
def CutStudyPath(self, playlist, is_data, is_output = True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"cut_study"+("test" if self.testing else ""), playlist,
"data" if is_data else "mc",
self.selection_tag+"_"+self.ntuple_tag+
("_"+str(self.count[0]) if self.count[0] is not None else "")+
".root")
def TruthHistoPath(self,playlist,is_output = True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"truth_dist"+("test" if self.testing else ""),playlist,
"mc", self.selection_tag+"_"+self.ntuple_tag+("_"+str(self.count[0]) if self.count[0] is not None else "")+
".root")
def UnfoldedHistoPath(self,playlist,tag,is_output=True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"unfolded"+("test" if self.testing else ""),playlist,
"", self.selection_tag+"_"+self.ntuple_tag+"_"+tag+
".root")
def XSecHistoPath(self,playlist,is_output=True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"xsec"+("test" if self.testing else ""),playlist,
"", self.selection_tag+"_"+self.ntuple_tag+
".root")
def BackgroundFitPath(self, playlist, tag, is_output = True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"bkgfit", playlist, "" , tag+"_"+self.selection_tag+"_"+self.ntuple_tag+".root")
def PlotPath(self, plot_name, sideband,tag=""):
return self.FilePath(self.output_dir,"plot/"+plot_name, sideband, "" , self.selection_tag+"_"+tag)
#### entry point ####
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-p", "--playlist",
dest="playlist",
help="Process given playlists."
)
parser.add_argument("--grid",
action="store_true",
default = False,
help = "Run macro on grid, Input/Output path must be updated to avoid direct access to BlueArc"
)
parser.add_argument("-d", "--data_types",
dest="data_types",
action="append",
help="Data types to process. Supply this once for every data type you want to use.",
default=argparse.SUPPRESS,
)
parser.add_argument("--use-sideband", "--use_sideband",
dest="sidebands",
nargs='*',
help="Use this sideband rather than defalut. (If you use this option at all, you must use it to specify ALL the sidebands you want.) ",
default=SIDEBANDS,
)
parser.add_argument("--data_only", "--data-only",
dest="data_only",
action="store_true",
help="Shortcut option to process only data from the 'data_types' option. If you supply both '--data_only' and '--mc_only', '--mc-only' takes precedence.",
default=False,
)
parser.add_argument("--mc_only", "--mc-only",
dest="mc_only",
action="store_true",
help="Shortcut option to process only MC from the 'data_types' option. If you supply both '--data_only' and '--mc_only', '--mc-only' takes precedence.",
default=False,
)
parser.add_argument("--pc",
dest="is_pc",
action="store_true",
help="running over particle cannon sample",
default=False,
)
parser.add_argument("--signal",
dest="signal",
action="store_true",
help="Use only the extracted true signal event samples.",
default=argparse.SUPPRESS,
)
parser.add_argument("-t", "--test", "--testing",
dest="testing",
action="store_true",
help="Use a smaller sample for testing purposes. ",
default=False
)
parser.add_argument("-o", "--output",
dest = "output_dir",
help="Use alternate location for output file.",
default=BLUEARC
)
parser.add_argument("-i", "--input",
dest = "input_dir",
help="Use alternate location for input files other than ntuple.",
default=BLUEARC
)
parser.add_argument("--ntuple_tag", "--ntuple-tag",
help="Use ntuple playlist tagged by given tag.",
default="LLR"
)
parser.add_argument("--selection_tag","--selection-tag",
help="Use event selection histograms tagged by given tag.",
default="collab1"
)
parser.add_argument("--bkgTune_tag","--bkgTune-tag",
help="Use event selection histograms tagged by given tag.",
default="Global"
)
parser.add_argument("--count",
help="process arg1 subruns starting from arg0 entry of playlist.",
type = int,
nargs = 2,
default = [None,None])
parser.add_argument("--cal_POT","--cal-POT",
help="recount POT even if POT info is available",
dest = "POT_cal",
action="store_true",
default=False)
parser.add_argument("--only_cal_POT","--only-cal-POT",
help="do not run selection but only count POT",
dest = "run_reco",
action="store_false",
default=True)
parser.add_argument("--exclude_universes","--exclude-universes",
help="do not want some systematics universes, list their ShortName()",
nargs="*",
)
parser.add_argument("--skip_2p2h","--skip_2p2h",
help="do not want 2p2h events,(use this when you are going to run delicate 2p2h sample.)",
action="store_true",
default=False)
parser.add_argument("--truth",
help="run truth loop: more correct efficiency demominator",
action="store_true",
default=False)
parser.add_argument("--extra_weighter",
help="Name of extra weighter you want to use",
default=None)
options = parser.parse_args()
if options.data_only and options.mc_only:
options.data_only = False
#if options.playlist is None:
#print "Please specify a playlist."
#sys.exit(1)
# If only cal POT, doesn't make sense to read from record.
if not options.run_reco:
options.POT_cal=True
if options.grid:
#override start variable by $PROCESS variable
nth_job = int(os.environ["PROCESS"])
options.count[0]=nth_job*options.count[1]+options.count[0]
if options.testing:
options.count = [0,1]
AnalysisConfig = _AnalysisConfig(**vars(options))
print("Using analysis configuration:")
print(AnalysisConfig)
|
[
"pprint.pformat",
"argparse.ArgumentParser",
"os.walk",
"ROOT.gROOT.SetBatch",
"os.path.join"
] |
[((328, 349), 'ROOT.gROOT.SetBatch', 'ROOT.gROOT.SetBatch', ([], {}), '()\n', (347, 349), False, 'import ROOT\n'), ((7591, 7670), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (7614, 7670), False, 'import argparse\n'), ((1394, 1417), 'pprint.pformat', 'pprint.pformat', (['my_dict'], {}), '(my_dict)\n', (1408, 1417), False, 'import pprint\n'), ((3939, 3959), 'os.walk', 'os.walk', (['NTUPLE_PATH'], {}), '(NTUPLE_PATH)\n', (3946, 3959), False, 'import os\n'), ((4090, 4118), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (4102, 4118), False, 'import os\n')]
|
import numpy as np
import collections
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.manifold import MDS
from time import time
from warnings import warn
class ForestSim():
def __init__(self, forest):
# TODO : adapt if non sklearn forest used
self.forest = forest
def fit(self, X, y = 2, randomize = False, nb_repet = 1, keep_all_mat = False):
self.X = np.float32(X) #used in tree.apply function
self.y = y
self.n = self.X.shape[0]
self.similarity_matrix = np.zeros((self.n,self.n))
# True to keep all sim matrices
if keep_all_mat:
self.co_ocs = []
# create the target vector if needed
if not isinstance(self.y, collections.Sequence):
self.y_ = np.random.choice(self.y, size = (self.n,))
else:
self.y_ = self.y
t0 = time()
for repet_id in range(nb_repet):
t = time()
print("Fitting - {}/{} iteration".format(repet_id,nb_repet))
# random seed to have changing bootstrapping in forest.fit
np.random.seed(repet_id)
if randomize:
np.random.shuffle(self.y_)
self.forest.fit(self.X,self.y_) # check inplace op
sim = self.calculate_a_sim_mat()
self.similarity_matrix += sim
if keep_all_mat:
self.co_ocs.append(sim)
print("Took {} seconds".format(np.round(time()-t, decimals=2)))
print("Total time : {} seconds".format(np.round(time()-t0, decimals=2)))
self.similarity_matrix /= nb_repet
return (self)
def calculate_a_sim_mat(self):
co_oc = np.zeros((self.n,self.n))
for iter_, dt in enumerate(self.forest.estimators_):
leafs_id = dt.tree_.apply(self.X)
ser = pd.DataFrame(data={"ser":leafs_id, "ones":1})
ser = ser.pivot(columns="ser").fillna(0)
ser = ser.dot(ser.T)
co_oc+= ser.values
# pondération par unique n of leaf a reflechir
co_oc = co_oc/len(self.forest.estimators_)
return (co_oc)
# should we return a copy ?
def get_similarity_matrix(self):
return (self.similarity_matrix)
def get_distance_matrix(self):
return (np.sqrt(1-self.similarity_matrix))
# use sklearn.manifold.MDS kwags
def apply_MDS(self,n_instance=100, dissimilarity = "precomputed",**kwargs):
np.random.seed(0)
if isinstance(n_instance,int) and 0<n_instance and n_instance<=self.n:
idx = np.random.choice(self.n,n_instance,replace=False)
elif isinstance(n_instance,float) and 0<n_instance and n_instance<=1:
idx = np.random.choice(self.n,int(self.n*n_instance),replace=False)
else:
warn("invalid n_instance argument - should be in [0.0;1.0] or [0,self.n]")
idx = np.arange(self.n)
if len(idx) == self.n:
print("Computing MDS on all {} instances.".format(self.n))
else:
print("Computing MDS on {} / {} instances.".format(len(idx),self.n))
kwargs.update({"dissimilarity":dissimilarity})
if "dissimilarity" not in kwargs.keys():
print("Computing non precomputed MDS - set dissimilarity to precomputed to use the distance matrix")
mds = MDS(**kwargs)
self.X_mds = mds.fit_transform(self.X[idx,:])
else:
print("Computing MDS on precomputed dissimilarities.")
mds = MDS(**kwargs)
dist_mat_ = self.get_distance_matrix()[idx][:,idx]
self.X_mds = mds.fit_transform(dist_mat_)
return (self.X_mds)
def project_MDS_2D(self, **kwargs):
# TODO : add saving options
# TODO : add the necessary sampling, then stratified sampling...
plt.figure(figsize=(8,8))
sns.scatterplot(x = self.X_mds[:,0],
y=self.X_mds[:,1]
)
plt.show()
def main():
# should be able to take a standard csv file somewhere, apply one of the two methods, and output the sim mat in a csv file
print("work in progress")
if __name__ == "__main__":
main()
|
[
"pandas.DataFrame",
"numpy.random.seed",
"matplotlib.pyplot.show",
"seaborn.scatterplot",
"numpy.float32",
"numpy.zeros",
"time.time",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.random.choice",
"warnings.warn",
"sklearn.manifold.MDS",
"numpy.random.shuffle",
"numpy.sqrt"
] |
[((405, 418), 'numpy.float32', 'np.float32', (['X'], {}), '(X)\n', (415, 418), True, 'import numpy as np\n'), ((515, 541), 'numpy.zeros', 'np.zeros', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (523, 541), True, 'import numpy as np\n'), ((796, 802), 'time.time', 'time', ([], {}), '()\n', (800, 802), False, 'from time import time\n'), ((1470, 1496), 'numpy.zeros', 'np.zeros', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (1478, 1496), True, 'import numpy as np\n'), ((2006, 2041), 'numpy.sqrt', 'np.sqrt', (['(1 - self.similarity_matrix)'], {}), '(1 - self.similarity_matrix)\n', (2013, 2041), True, 'import numpy as np\n'), ((2155, 2172), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2169, 2172), True, 'import numpy as np\n'), ((3349, 3375), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3359, 3375), True, 'import matplotlib.pyplot as plt\n'), ((3377, 3432), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'self.X_mds[:, 0]', 'y': 'self.X_mds[:, 1]'}), '(x=self.X_mds[:, 0], y=self.X_mds[:, 1])\n', (3392, 3432), True, 'import seaborn as sns\n'), ((3442, 3452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3450, 3452), True, 'import matplotlib.pyplot as plt\n'), ((718, 758), 'numpy.random.choice', 'np.random.choice', (['self.y'], {'size': '(self.n,)'}), '(self.y, size=(self.n,))\n', (734, 758), True, 'import numpy as np\n'), ((845, 851), 'time.time', 'time', ([], {}), '()\n', (849, 851), False, 'from time import time\n'), ((982, 1006), 'numpy.random.seed', 'np.random.seed', (['repet_id'], {}), '(repet_id)\n', (996, 1006), True, 'import numpy as np\n'), ((1604, 1651), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ser': leafs_id, 'ones': 1}"}), "(data={'ser': leafs_id, 'ones': 1})\n", (1616, 1651), True, 'import pandas as pd\n'), ((2255, 2306), 'numpy.random.choice', 'np.random.choice', (['self.n', 'n_instance'], {'replace': '(False)'}), '(self.n, n_instance, replace=False)\n', (2271, 2306), True, 'import numpy as np\n'), ((2935, 2948), 'sklearn.manifold.MDS', 'MDS', ([], {}), '(**kwargs)\n', (2938, 2948), False, 'from sklearn.manifold import MDS\n'), ((3073, 3086), 'sklearn.manifold.MDS', 'MDS', ([], {}), '(**kwargs)\n', (3076, 3086), False, 'from sklearn.manifold import MDS\n'), ((1028, 1054), 'numpy.random.shuffle', 'np.random.shuffle', (['self.y_'], {}), '(self.y_)\n', (1045, 1054), True, 'import numpy as np\n'), ((2459, 2533), 'warnings.warn', 'warn', (['"""invalid n_instance argument - should be in [0.0;1.0] or [0,self.n]"""'], {}), "('invalid n_instance argument - should be in [0.0;1.0] or [0,self.n]')\n", (2463, 2533), False, 'from warnings import warn\n'), ((2543, 2560), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (2552, 2560), True, 'import numpy as np\n'), ((1345, 1351), 'time.time', 'time', ([], {}), '()\n', (1349, 1351), False, 'from time import time\n'), ((1271, 1277), 'time.time', 'time', ([], {}), '()\n', (1275, 1277), False, 'from time import time\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""CNN_using_persistence_images_on_patch.py
The aim of this script is to perform the training of a CNN using persistence
images as a input. This script is inspired from this script:
BorgwardtLab/ADNI_MRI_Analysis/blob/mixed_CNN/mixed_CNN/run_Sarah.py
To get real time information into the model training and structure, run
$ tensorboard --logdir logs/fit
once this script has been started.
NOTES:
- One loaded, the "big" 100x100x3 images aren't that big (>400MB in RAM) so
NO GENERATOR NEEDED
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import dotenv
import datetime
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import pandas as pd
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from itertools import islice
import shutil
print(tf.test.gpu_device_name())
DOTENV_KEY2VAL = dotenv.dotenv_values()
tf.random.set_seed(42)
N_BINS = 1000
N_FILTERS = 4
KERNEL_SIZE = 4
DROPOUT_RATE = 0.3
################################################################################
# Functions
################################################################################
persistence_image_location = (
DOTENV_KEY2VAL["DATA_DIR"] + "/global_persistence_images/"
)
partitions_location = DOTENV_KEY2VAL["DATA_DIR"] + "/partitions/"
diagnosis_json = (
DOTENV_KEY2VAL["DATA_DIR"] + "/collected_diagnoses_complete.json"
)
def make_model(input_shape):
"""Makes a keras model.
Args:
input_shape (tuple): input shape of the neural network
num_classes (int): number of classes involved
Returns:
keral.Model: model ready to be trained
"""
inputs = keras.Input(shape=input_shape)
tower_1 = layers.Conv2D(
N_FILTERS, KERNEL_SIZE, padding="same", activation="relu"
)(inputs[:, :, :, 0:1])
tower_1 = layers.BatchNormalization()(tower_1)
tower_1 = layers.MaxPooling2D()(tower_1)
tower_2 = layers.Conv2D(
N_FILTERS, KERNEL_SIZE, padding="same", activation="relu"
)(inputs[:, :, :, 1:2])
tower_2 = layers.BatchNormalization()(tower_2)
tower_2 = layers.MaxPooling2D()(tower_2)
tower_3 = layers.Conv2D(
N_FILTERS, KERNEL_SIZE, padding="same", activation="relu"
)(inputs[:, :, :, 2:])
tower_3 = layers.BatchNormalization()(tower_3)
tower_3 = layers.MaxPooling2D()(tower_3)
merged = layers.concatenate([tower_1, tower_2, tower_3], axis=1)
merged = layers.Flatten()(merged)
x = layers.Dense(500, activation="relu")(merged)
x = layers.Dropout(DROPOUT_RATE)(x)
x = layers.Dense(500, activation="relu")(merged)
x = layers.Dropout(DROPOUT_RATE)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
return keras.Model(inputs, outputs)
def get_partitions(partitions_location):
partition = []
labels = []
for root, dirs, files in os.walk(partitions_location):
for file in files:
if file.split("_")[0] == "partition":
partition.append(
np.load(
partitions_location + file, allow_pickle=True
).item()
)
elif file.split("_")[0] == "labels":
labels.append(
np.load(
partitions_location + file, allow_pickle=True
).item()
)
else:
print(f"File {file} is neither partition nor labels file")
return partition, labels
################################################################################
# Main
################################################################################
def main():
############################################################################
# Data loading and processing
############################################################################
inits = 3
partitions, labels = get_partitions(partitions_location)
histories = []
for partition, label in zip(partitions, labels):
for i in range(inits):
# Make sure there aren't the same patients in train and test
X_train_lst = []
y_train_lst = []
for image in tqdm(partition["train"]):
X_train_lst.append(
np.load(persistence_image_location + image + ".npy")
)
y_train_lst.append(label[image])
X_train, y_train = (
np.stack(X_train_lst, axis=0).reshape(
len(X_train_lst), N_BINS, N_BINS, 3
),
np.vstack(y_train_lst),
)
print("Training data loadede")
X_val_lst = []
y_val_lst = []
for image in tqdm(partition["validation"]):
X_val_lst.append(
np.load(persistence_image_location + image + ".npy")
)
y_val_lst.append(label[image])
X_val, y_val = (
np.stack(X_val_lst, axis=0).reshape(
len(X_val_lst), N_BINS, N_BINS, 3
),
np.vstack(y_val_lst),
)
print("Validation data loadede")
####################################################################
# Model definition
####################################################################
model = make_model(input_shape=(N_BINS, N_BINS, 3))
tf.keras.utils.plot_model(
model,
to_file="model.png",
show_shapes=True,
show_layer_names=True,
rankdir="TB",
expand_nested=True,
dpi=96,
)
####################################################################
# Model training
####################################################################
epochs = 100
tensorboard_logs = "logs/fit"
if os.path.exists(tensorboard_logs):
shutil.rmtree(tensorboard_logs)
log_dir = "logs/fit/" + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S"
)
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=log_dir, histogram_freq=1
),
tf.keras.callbacks.EarlyStopping(
monitor="val_accuracy",
min_delta=0.001,
patience=10,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=True,
),
tf.keras.callbacks.ModelCheckpoint(
filepath="model_weights",
save_weights_only=True,
monitor="val_accuracy",
mode="max",
save_best_only=True,
),
]
lr = keras.optimizers.schedules.ExponentialDecay(
0.01, decay_steps=30, decay_rate=0.6, staircase=True
)
model.compile(
optimizer=keras.optimizers.Adam(
learning_rate=lr,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
amsgrad=False,
),
loss="binary_crossentropy",
metrics=[
keras.metrics.BinaryAccuracy(name="accuracy"),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
keras.metrics.AUC(name="auc"),
],
# run_eagerly=True,
)
history = model.fit(
X_train,
y_train,
epochs=epochs,
callbacks=callbacks,
batch_size=16,
validation_data=(X_val, y_val),
)
histories.append(history)
############################################################################
# Model evaluation
############################################################################
# Mosly already included into the training procedure.
last_acc = []
last_val_acc = []
last_val_prec = []
last_val_rec = []
last_val_auc = []
for hist in histories:
last_acc.append(max(hist.history["accuracy"]))
last_val_acc.append(max(hist.history["val_accuracy"]))
last_val_prec.append(max(hist.history["val_precision"]))
last_val_rec.append(max(hist.history["val_recall"]))
last_val_auc.append(max(hist.history["val_auc"]))
print(
f"The mean training accuracy over the folds is {np.mean(last_acc)}, pm {np.std(last_acc)}"
)
print(
f"The mean validation accuracy over the folds is {np.mean(last_val_acc)}, pm {np.std(last_val_acc)}"
)
print(
f"The mean validation precision over the folds is {np.mean(last_val_prec)}, pm {np.std(last_val_prec)}"
)
print(
f"The mean validation recall over the folds is {np.mean(last_val_rec)}, pm {np.std(last_val_rec)}"
)
print(
f"The mean validation auc over the folds is {np.mean(last_val_auc)}, pm {np.std(last_val_auc)}"
)
############################################################################
# Model evaluation
############################################################################
# Here we actually extract the id of the samples that are misclassified
# y_pred = model.predict(X_train)
# difference = np.round(y_train - y_pred)
# index = np.nonzero(difference)
# y_pred = model.predict(X_val)
# difference = np.round(y_val - y_pred)
# index_2 = np.nonzero(difference)
# df_misclassified_train = pd.DataFrame(
# np.array(partitions[0]["train"])[index[0]]
# )
# df_misclassified_val = pd.DataFrame(
# np.array(partitions[0]["validation"])[index_2[0]]
# )
# df_misclassified = pd.concat(
# [df_misclassified_train, df_misclassified_val]
# )
# df_misclassified.to_csv(
# DOTENV_KEY2VAL["GEN_DATA_DIR"] + "misclassification.csv"
# )
if __name__ == "__main__":
main()
|
[
"tensorflow.random.set_seed",
"numpy.load",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"os.walk",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.concatenate",
"numpy.mean",
"tensorflow.keras.metrics.BinaryAccuracy",
"shutil.rmtree",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.BatchNormalization",
"numpy.std",
"tensorflow.keras.Input",
"os.path.exists",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.Precision",
"dotenv.dotenv_values",
"datetime.datetime.now",
"numpy.stack",
"tqdm.tqdm",
"tensorflow.keras.metrics.AUC",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.Model",
"tensorflow.test.gpu_device_name",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"numpy.vstack",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.metrics.Recall",
"tensorflow.keras.callbacks.TensorBoard"
] |
[((934, 956), 'dotenv.dotenv_values', 'dotenv.dotenv_values', ([], {}), '()\n', (954, 956), False, 'import dotenv\n'), ((957, 979), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (975, 979), True, 'import tensorflow as tf\n'), ((889, 914), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (912, 914), True, 'import tensorflow as tf\n'), ((1742, 1772), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1753, 1772), False, 'from tensorflow import keras\n'), ((2446, 2501), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[tower_1, tower_2, tower_3]'], {'axis': '(1)'}), '([tower_1, tower_2, tower_3], axis=1)\n', (2464, 2501), False, 'from tensorflow.keras import layers\n'), ((2792, 2820), 'tensorflow.keras.Model', 'keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (2803, 2820), False, 'from tensorflow import keras\n'), ((2928, 2956), 'os.walk', 'os.walk', (['partitions_location'], {}), '(partitions_location)\n', (2935, 2956), False, 'import os\n'), ((1788, 1860), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['N_FILTERS', 'KERNEL_SIZE'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(N_FILTERS, KERNEL_SIZE, padding='same', activation='relu')\n", (1801, 1860), False, 'from tensorflow.keras import layers\n'), ((1911, 1938), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1936, 1938), False, 'from tensorflow.keras import layers\n'), ((1962, 1983), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (1981, 1983), False, 'from tensorflow.keras import layers\n'), ((2008, 2080), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['N_FILTERS', 'KERNEL_SIZE'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(N_FILTERS, KERNEL_SIZE, padding='same', activation='relu')\n", (2021, 2080), False, 'from tensorflow.keras import layers\n'), ((2131, 2158), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2156, 2158), False, 'from tensorflow.keras import layers\n'), ((2182, 2203), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (2201, 2203), False, 'from tensorflow.keras import layers\n'), ((2228, 2300), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['N_FILTERS', 'KERNEL_SIZE'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(N_FILTERS, KERNEL_SIZE, padding='same', activation='relu')\n", (2241, 2300), False, 'from tensorflow.keras import layers\n'), ((2350, 2377), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2375, 2377), False, 'from tensorflow.keras import layers\n'), ((2401, 2422), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (2420, 2422), False, 'from tensorflow.keras import layers\n'), ((2515, 2531), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2529, 2531), False, 'from tensorflow.keras import layers\n'), ((2548, 2584), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (2560, 2584), False, 'from tensorflow.keras import layers\n'), ((2601, 2629), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['DROPOUT_RATE'], {}), '(DROPOUT_RATE)\n', (2615, 2629), False, 'from tensorflow.keras import layers\n'), ((2641, 2677), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (2653, 2677), False, 'from tensorflow.keras import layers\n'), ((2694, 2722), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['DROPOUT_RATE'], {}), '(DROPOUT_RATE)\n', (2708, 2722), False, 'from tensorflow.keras import layers\n'), ((2740, 2777), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2752, 2777), False, 'from tensorflow.keras import layers\n'), ((4278, 4302), 'tqdm.tqdm', 'tqdm', (["partition['train']"], {}), "(partition['train'])\n", (4282, 4302), False, 'from tqdm import tqdm\n'), ((4844, 4873), 'tqdm.tqdm', 'tqdm', (["partition['validation']"], {}), "(partition['validation'])\n", (4848, 4873), False, 'from tqdm import tqdm\n'), ((5595, 5735), 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['model'], {'to_file': '"""model.png"""', 'show_shapes': '(True)', 'show_layer_names': '(True)', 'rankdir': '"""TB"""', 'expand_nested': '(True)', 'dpi': '(96)'}), "(model, to_file='model.png', show_shapes=True,\n show_layer_names=True, rankdir='TB', expand_nested=True, dpi=96)\n", (5620, 5735), True, 'import tensorflow as tf\n'), ((6136, 6168), 'os.path.exists', 'os.path.exists', (['tensorboard_logs'], {}), '(tensorboard_logs)\n', (6150, 6168), False, 'import os\n'), ((7120, 7221), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'keras.optimizers.schedules.ExponentialDecay', (['(0.01)'], {'decay_steps': '(30)', 'decay_rate': '(0.6)', 'staircase': '(True)'}), '(0.01, decay_steps=30,\n decay_rate=0.6, staircase=True)\n', (7163, 7221), False, 'from tensorflow import keras\n'), ((6186, 6217), 'shutil.rmtree', 'shutil.rmtree', (['tensorboard_logs'], {}), '(tensorboard_logs)\n', (6199, 6217), False, 'import shutil\n'), ((6377, 6442), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(1)'}), '(log_dir=log_dir, histogram_freq=1)\n', (6407, 6442), True, 'import tensorflow as tf\n'), ((6498, 6658), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'min_delta': '(0.001)', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""auto"""', 'baseline': 'None', 'restore_best_weights': '(True)'}), "(monitor='val_accuracy', min_delta=0.001,\n patience=10, verbose=0, mode='auto', baseline=None,\n restore_best_weights=True)\n", (6530, 6658), True, 'import tensorflow as tf\n'), ((6827, 6976), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': '"""model_weights"""', 'save_weights_only': '(True)', 'monitor': '"""val_accuracy"""', 'mode': '"""max"""', 'save_best_only': '(True)'}), "(filepath='model_weights',\n save_weights_only=True, monitor='val_accuracy', mode='max',\n save_best_only=True)\n", (6861, 6976), True, 'import tensorflow as tf\n'), ((8923, 8940), 'numpy.mean', 'np.mean', (['last_acc'], {}), '(last_acc)\n', (8930, 8940), True, 'import numpy as np\n'), ((8947, 8963), 'numpy.std', 'np.std', (['last_acc'], {}), '(last_acc)\n', (8953, 8963), True, 'import numpy as np\n'), ((9041, 9062), 'numpy.mean', 'np.mean', (['last_val_acc'], {}), '(last_val_acc)\n', (9048, 9062), True, 'import numpy as np\n'), ((9069, 9089), 'numpy.std', 'np.std', (['last_val_acc'], {}), '(last_val_acc)\n', (9075, 9089), True, 'import numpy as np\n'), ((9168, 9190), 'numpy.mean', 'np.mean', (['last_val_prec'], {}), '(last_val_prec)\n', (9175, 9190), True, 'import numpy as np\n'), ((9197, 9218), 'numpy.std', 'np.std', (['last_val_prec'], {}), '(last_val_prec)\n', (9203, 9218), True, 'import numpy as np\n'), ((9294, 9315), 'numpy.mean', 'np.mean', (['last_val_rec'], {}), '(last_val_rec)\n', (9301, 9315), True, 'import numpy as np\n'), ((9322, 9342), 'numpy.std', 'np.std', (['last_val_rec'], {}), '(last_val_rec)\n', (9328, 9342), True, 'import numpy as np\n'), ((9415, 9436), 'numpy.mean', 'np.mean', (['last_val_auc'], {}), '(last_val_auc)\n', (9422, 9436), True, 'import numpy as np\n'), ((9443, 9463), 'numpy.std', 'np.std', (['last_val_auc'], {}), '(last_val_auc)\n', (9449, 9463), True, 'import numpy as np\n'), ((4360, 4412), 'numpy.load', 'np.load', (["(persistence_image_location + image + '.npy')"], {}), "(persistence_image_location + image + '.npy')\n", (4367, 4412), True, 'import numpy as np\n'), ((4680, 4702), 'numpy.vstack', 'np.vstack', (['y_train_lst'], {}), '(y_train_lst)\n', (4689, 4702), True, 'import numpy as np\n'), ((4929, 4981), 'numpy.load', 'np.load', (["(persistence_image_location + image + '.npy')"], {}), "(persistence_image_location + image + '.npy')\n", (4936, 4981), True, 'import numpy as np\n'), ((5239, 5259), 'numpy.vstack', 'np.vstack', (['y_val_lst'], {}), '(y_val_lst)\n', (5248, 5259), True, 'import numpy as np\n'), ((7301, 7401), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-07)', 'amsgrad': '(False)'}), '(learning_rate=lr, beta_1=0.9, beta_2=0.999, epsilon=\n 1e-07, amsgrad=False)\n', (7322, 7401), False, 'from tensorflow import keras\n'), ((6255, 6278), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6276, 6278), False, 'import datetime\n'), ((7607, 7652), 'tensorflow.keras.metrics.BinaryAccuracy', 'keras.metrics.BinaryAccuracy', ([], {'name': '"""accuracy"""'}), "(name='accuracy')\n", (7635, 7652), False, 'from tensorflow import keras\n'), ((7674, 7715), 'tensorflow.keras.metrics.Precision', 'keras.metrics.Precision', ([], {'name': '"""precision"""'}), "(name='precision')\n", (7697, 7715), False, 'from tensorflow import keras\n'), ((7737, 7772), 'tensorflow.keras.metrics.Recall', 'keras.metrics.Recall', ([], {'name': '"""recall"""'}), "(name='recall')\n", (7757, 7772), False, 'from tensorflow import keras\n'), ((7794, 7823), 'tensorflow.keras.metrics.AUC', 'keras.metrics.AUC', ([], {'name': '"""auc"""'}), "(name='auc')\n", (7811, 7823), False, 'from tensorflow import keras\n'), ((3089, 3143), 'numpy.load', 'np.load', (['(partitions_location + file)'], {'allow_pickle': '(True)'}), '(partitions_location + file, allow_pickle=True)\n', (3096, 3143), True, 'import numpy as np\n'), ((4538, 4567), 'numpy.stack', 'np.stack', (['X_train_lst'], {'axis': '(0)'}), '(X_train_lst, axis=0)\n', (4546, 4567), True, 'import numpy as np\n'), ((5101, 5128), 'numpy.stack', 'np.stack', (['X_val_lst'], {'axis': '(0)'}), '(X_val_lst, axis=0)\n', (5109, 5128), True, 'import numpy as np\n'), ((3315, 3369), 'numpy.load', 'np.load', (['(partitions_location + file)'], {'allow_pickle': '(True)'}), '(partitions_location + file, allow_pickle=True)\n', (3322, 3369), True, 'import numpy as np\n')]
|
"""
Create docstring tests for the following functions
"""
import doctest
# Example
def get_area_rect(length, width):
"""Returns area of rectangle
>>> get_area_rect(5 , 5)
25
>>> get_area_rect(5 , 0)
Traceback (most recent call last):
...
ValueError
>>> get_area_rect(5 , -1)
Traceback (most recent call last):
...
ValueError
"""
if(length <= 0) or (width <= 0):
raise ValueError
return length * width
# Your turn
def validate_input(x):
"""Validates that input give is between 1 and 10
>>> validate_input(5)
True
>>> validate_input(-2)
False
>>> validate_input(12)
False
"""
x = int(x)
if 1 <= x <= 10:
return True
else:
return False
def true_if_hello(x):
"""Will return true if and only if passed the str 'hello'
>>> true_if_hello('hello')
True
>>> true_if_hello('olleh')
False
>>> true_if_hello(7)
False
"""
x = str(x)
if x == 'hello':
return True
return False
doctest.testmod()
|
[
"doctest.testmod"
] |
[((1230, 1247), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1245, 1247), False, 'import doctest\n')]
|
"""A simple, 2D peridynamics simulation example."""
import argparse
import cProfile
from io import StringIO
import numpy as np
import pathlib
from peridynamics import Model
from peridynamics.model import initial_crack_helper
from peridynamics.integrators import Euler
from pstats import SortKey, Stats
mesh_file = pathlib.Path(__file__).parent.absolute() / "test.msh"
@initial_crack_helper
def is_crack(x, y):
"""Determine whether a pair of particles define the crack."""
output = 0
crack_length = 0.3
p1 = x
p2 = y
if x[0] > y[0]:
p2 = x
p1 = y
# 1e-6 makes it fall one side of central line of particles
if p1[0] < 0.5 + 1e-6 and p2[0] > 0.5 + 1e-6:
# draw a straight line between them
m = (p2[1] - p1[1]) / (p2[0] - p1[0])
c = p1[1] - m * p1[0]
# height a x = 0.5
height = m * 0.5 + c
if (height > 0.5 * (1 - crack_length)
and height < 0.5 * (1 + crack_length)):
output = 1
return output
def boundary_function(model, u, step):
"""
Apply a load to the system.
Particles on each of the sides of the system are pulled apart with
increasing time step.
"""
load_rate = 0.00001
u[model.lhs, 1:3] = np.zeros((len(model.lhs), 2))
u[model.rhs, 1:3] = np.zeros((len(model.rhs), 2))
u[model.lhs, 0] = (
-0.5 * step * load_rate * np.ones(len(model.rhs))
)
u[model.rhs, 0] = (
0.5 * step * load_rate * np.ones(len(model.rhs))
)
return u
def main():
"""Conduct a peridynamics simulation."""
parser = argparse.ArgumentParser()
parser.add_argument('--profile', action='store_const', const=True)
args = parser.parse_args()
if args.profile:
profile = cProfile.Profile()
profile.enable()
model = Model(mesh_file, horizon=0.1, critical_strain=0.005,
elastic_modulus=0.05, initial_crack=is_crack)
# Set left-hand side and right-hand side of boundary
indices = np.arange(model.nnodes)
model.lhs = indices[model.coords[:, 0] < 1.5*model.horizon]
model.rhs = indices[model.coords[:, 0] > 1.0 - 1.5*model.horizon]
integrator = Euler(dt=1e-3)
u, damage, *_ = model.simulate(
steps=100,
integrator=integrator,
boundary_function=boundary_function,
write=1000
)
if args.profile:
profile.disable()
s = StringIO()
stats = Stats(profile, stream=s).sort_stats(SortKey.CUMULATIVE)
stats.print_stats()
print(s.getvalue())
if __name__ == "__main__":
main()
|
[
"io.StringIO",
"argparse.ArgumentParser",
"peridynamics.Model",
"pstats.Stats",
"cProfile.Profile",
"pathlib.Path",
"numpy.arange",
"peridynamics.integrators.Euler"
] |
[((1611, 1636), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1634, 1636), False, 'import argparse\n'), ((1836, 1938), 'peridynamics.Model', 'Model', (['mesh_file'], {'horizon': '(0.1)', 'critical_strain': '(0.005)', 'elastic_modulus': '(0.05)', 'initial_crack': 'is_crack'}), '(mesh_file, horizon=0.1, critical_strain=0.005, elastic_modulus=0.05,\n initial_crack=is_crack)\n', (1841, 1938), False, 'from peridynamics import Model\n'), ((2025, 2048), 'numpy.arange', 'np.arange', (['model.nnodes'], {}), '(model.nnodes)\n', (2034, 2048), True, 'import numpy as np\n'), ((2201, 2216), 'peridynamics.integrators.Euler', 'Euler', ([], {'dt': '(0.001)'}), '(dt=0.001)\n', (2206, 2216), False, 'from peridynamics.integrators import Euler\n'), ((1779, 1797), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (1795, 1797), False, 'import cProfile\n'), ((2437, 2447), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2445, 2447), False, 'from io import StringIO\n'), ((315, 337), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (327, 337), False, 'import pathlib\n'), ((2464, 2488), 'pstats.Stats', 'Stats', (['profile'], {'stream': 's'}), '(profile, stream=s)\n', (2469, 2488), False, 'from pstats import SortKey, Stats\n')]
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Common WebDAV error handling code.
There are two types of error views. Ones that get caught by the WebDAV protocol
and the other which escapes to the publisher. Both these views implement
different interface which we can control through the WebDAV package via the
IPublication.handleException method.
"""
__docformat__ = 'restructuredtext'
from xml.etree import ElementTree
from zope import interface
from zope import schema
from zope import component
import zope.publisher.interfaces.http
from zope.publisher.interfaces.http import IHTTPException
import zope.publisher.defaultview
import z3c.dav.interfaces
import z3c.dav.utils
class DAVError(object):
interface.implements(z3c.dav.interfaces.IDAVErrorWidget)
component.adapts(interface.Interface,
z3c.dav.interfaces.IWebDAVRequest)
def __init__(self, context, request):
self.context = context
self.request = request
status = None
errors = []
propstatdescription = ""
responsedescription = ""
class ConflictError(DAVError):
status = 409
class ForbiddenError(DAVError):
status = 403
class PropertyNotFoundError(DAVError):
status = 404
class FailedDependencyError(DAVError):
# context is generally None for a failed dependency error.
status = 424
class AlreadyLockedError(DAVError):
status = 423
class UnauthorizedError(DAVError):
status = 401
################################################################################
#
# Multi-status error view
#
################################################################################
class MultiStatusErrorView(object):
component.adapts(z3c.dav.interfaces.IWebDAVErrors,
z3c.dav.interfaces.IWebDAVRequest)
interface.implements(IHTTPException)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
multistatus = z3c.dav.utils.MultiStatus()
if len(self.error.errors) == 1 and \
self.error.errors[0].resource == self.error.context:
# If have only one error and the context on which we raised the
# exception, then we just try and view the default view of the
# error.
error = self.error.errors[0]
name = zope.publisher.defaultview.queryDefaultViewName(
error, self.request)
if name is not None:
view = component.queryMultiAdapter(
(error, self.request), name = name)
return view()
seenContext = False
for error in self.error.errors:
if error.resource == self.error.context:
seenContext = True
davwidget = component.getMultiAdapter(
(error, self.request), z3c.dav.interfaces.IDAVErrorWidget)
response = z3c.dav.utils.Response(
z3c.dav.utils.getObjectURL(error.resource, self.request))
response.status = davwidget.status
# we don't generate a propstat elements during this view so
# we just ignore the propstatdescription.
response.responsedescription += davwidget.responsedescription
multistatus.responses.append(response)
if not seenContext:
response = z3c.dav.utils.Response(
z3c.dav.utils.getObjectURL(self.error.context, self.request))
response.status = 424 # Failed Dependency
multistatus.responses.append(response)
self.request.response.setStatus(207)
self.request.response.setHeader("content-type", "application/xml")
return ElementTree.tostring(multistatus(), encoding = "utf-8")
class WebDAVPropstatErrorView(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IWebDAVPropstatErrors,
z3c.dav.interfaces.IWebDAVRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
multistatus = z3c.dav.utils.MultiStatus()
response = z3c.dav.utils.Response(
z3c.dav.utils.getObjectURL(self.error.context, self.request))
multistatus.responses.append(response)
for prop, error in self.error.items():
error_view = component.getMultiAdapter(
(error, self.request), z3c.dav.interfaces.IDAVErrorWidget)
propstat = response.getPropstat(error_view.status)
if z3c.dav.interfaces.IDAVProperty.providedBy(prop):
## XXX - not tested - but is it needed?
prop = "{%s}%s" %(prop.namespace, prop.__name__)
propstat.properties.append(ElementTree.Element(prop))
## XXX - needs testing.
propstat.responsedescription += error_view.propstatdescription
response.responsedescription += error_view.responsedescription
self.request.response.setStatus(207)
self.request.response.setHeader("content-type", "application/xml")
return ElementTree.tostring(multistatus(), encoding = "utf-8")
################################################################################
#
# Some more generic exception view.
#
################################################################################
class HTTPForbiddenError(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IForbiddenError,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(403)
return ""
class HTTPConflictError(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IConflictError,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(409)
return ""
class PreconditionFailed(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IPreconditionFailed,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(412)
return ""
class HTTPUnsupportedMediaTypeError(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IUnsupportedMediaType,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(415)
return ""
class UnprocessableError(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IUnprocessableError,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self):
self.request.response.setStatus(422)
return ""
class AlreadyLockedErrorView(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IAlreadyLocked,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, context, request):
self.request = request
def __call__(self):
self.request.response.setStatus(423)
return ""
class BadGateway(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IBadGateway,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(502)
return ""
|
[
"zope.component.queryMultiAdapter",
"zope.interface.implements",
"xml.etree.ElementTree.Element",
"zope.component.adapts",
"zope.component.getMultiAdapter"
] |
[((1301, 1357), 'zope.interface.implements', 'interface.implements', (['z3c.dav.interfaces.IDAVErrorWidget'], {}), '(z3c.dav.interfaces.IDAVErrorWidget)\n', (1321, 1357), False, 'from zope import interface\n'), ((1362, 1434), 'zope.component.adapts', 'component.adapts', (['interface.Interface', 'z3c.dav.interfaces.IWebDAVRequest'], {}), '(interface.Interface, z3c.dav.interfaces.IWebDAVRequest)\n', (1378, 1434), False, 'from zope import component\n'), ((2280, 2370), 'zope.component.adapts', 'component.adapts', (['z3c.dav.interfaces.IWebDAVErrors', 'z3c.dav.interfaces.IWebDAVRequest'], {}), '(z3c.dav.interfaces.IWebDAVErrors, z3c.dav.interfaces.\n IWebDAVRequest)\n', (2296, 2370), False, 'from zope import component\n'), ((2391, 2427), 'zope.interface.implements', 'interface.implements', (['IHTTPException'], {}), '(IHTTPException)\n', (2411, 2427), False, 'from zope import interface\n'), ((4406, 4442), 'zope.interface.implements', 'interface.implements', (['IHTTPException'], {}), '(IHTTPException)\n', (4426, 4442), False, 'from zope import interface\n'), ((4447, 4545), 'zope.component.adapts', 'component.adapts', (['z3c.dav.interfaces.IWebDAVPropstatErrors', 'z3c.dav.interfaces.IWebDAVRequest'], {}), '(z3c.dav.interfaces.IWebDAVPropstatErrors, z3c.dav.\n interfaces.IWebDAVRequest)\n', (4463, 4545), False, 'from zope import component\n'), ((6013, 6049), 'zope.interface.implements', 'interface.implements', (['IHTTPException'], {}), '(IHTTPException)\n', (6033, 6049), False, 'from zope import interface\n'), ((6054, 6156), 'zope.component.adapts', 'component.adapts', (['z3c.dav.interfaces.IForbiddenError', 'zope.publisher.interfaces.http.IHTTPRequest'], {}), '(z3c.dav.interfaces.IForbiddenError, zope.publisher.\n interfaces.http.IHTTPRequest)\n', (6070, 6156), False, 'from zope import component\n'), ((6399, 6435), 'zope.interface.implements', 'interface.implements', (['IHTTPException'], {}), '(IHTTPException)\n', (6419, 6435), False, 'from zope import interface\n'), ((6440, 6541), 'zope.component.adapts', 'component.adapts', (['z3c.dav.interfaces.IConflictError', 'zope.publisher.interfaces.http.IHTTPRequest'], {}), '(z3c.dav.interfaces.IConflictError, zope.publisher.\n interfaces.http.IHTTPRequest)\n', (6456, 6541), False, 'from zope import component\n'), ((6785, 6821), 'zope.interface.implements', 'interface.implements', (['IHTTPException'], {}), '(IHTTPException)\n', (6805, 6821), False, 'from zope import interface\n'), ((6826, 6932), 'zope.component.adapts', 'component.adapts', (['z3c.dav.interfaces.IPreconditionFailed', 'zope.publisher.interfaces.http.IHTTPRequest'], {}), '(z3c.dav.interfaces.IPreconditionFailed, zope.publisher.\n interfaces.http.IHTTPRequest)\n', (6842, 6932), False, 'from zope import component\n'), ((7187, 7223), 'zope.interface.implements', 'interface.implements', (['IHTTPException'], {}), '(IHTTPException)\n', (7207, 7223), False, 'from zope import interface\n'), ((7228, 7336), 'zope.component.adapts', 'component.adapts', (['z3c.dav.interfaces.IUnsupportedMediaType', 'zope.publisher.interfaces.http.IHTTPRequest'], {}), '(z3c.dav.interfaces.IUnsupportedMediaType, zope.publisher.\n interfaces.http.IHTTPRequest)\n', (7244, 7336), False, 'from zope import component\n'), ((7580, 7616), 'zope.interface.implements', 'interface.implements', (['IHTTPException'], {}), '(IHTTPException)\n', (7600, 7616), False, 'from zope import interface\n'), ((7621, 7727), 'zope.component.adapts', 'component.adapts', (['z3c.dav.interfaces.IUnprocessableError', 'zope.publisher.interfaces.http.IHTTPRequest'], {}), '(z3c.dav.interfaces.IUnprocessableError, zope.publisher.\n interfaces.http.IHTTPRequest)\n', (7637, 7727), False, 'from zope import component\n'), ((7981, 8017), 'zope.interface.implements', 'interface.implements', (['IHTTPException'], {}), '(IHTTPException)\n', (8001, 8017), False, 'from zope import interface\n'), ((8022, 8123), 'zope.component.adapts', 'component.adapts', (['z3c.dav.interfaces.IAlreadyLocked', 'zope.publisher.interfaces.http.IHTTPRequest'], {}), '(z3c.dav.interfaces.IAlreadyLocked, zope.publisher.\n interfaces.http.IHTTPRequest)\n', (8038, 8123), False, 'from zope import component\n'), ((8334, 8370), 'zope.interface.implements', 'interface.implements', (['IHTTPException'], {}), '(IHTTPException)\n', (8354, 8370), False, 'from zope import interface\n'), ((8375, 8473), 'zope.component.adapts', 'component.adapts', (['z3c.dav.interfaces.IBadGateway', 'zope.publisher.interfaces.http.IHTTPRequest'], {}), '(z3c.dav.interfaces.IBadGateway, zope.publisher.interfaces.\n http.IHTTPRequest)\n', (8391, 8473), False, 'from zope import component\n'), ((3387, 3476), 'zope.component.getMultiAdapter', 'component.getMultiAdapter', (['(error, self.request)', 'z3c.dav.interfaces.IDAVErrorWidget'], {}), '((error, self.request), z3c.dav.interfaces.\n IDAVErrorWidget)\n', (3412, 3476), False, 'from zope import component\n'), ((4974, 5063), 'zope.component.getMultiAdapter', 'component.getMultiAdapter', (['(error, self.request)', 'z3c.dav.interfaces.IDAVErrorWidget'], {}), '((error, self.request), z3c.dav.interfaces.\n IDAVErrorWidget)\n', (4999, 5063), False, 'from zope import component\n'), ((3090, 3151), 'zope.component.queryMultiAdapter', 'component.queryMultiAdapter', (['(error, self.request)'], {'name': 'name'}), '((error, self.request), name=name)\n', (3117, 3151), False, 'from zope import component\n'), ((5366, 5391), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['prop'], {}), '(prop)\n', (5385, 5391), False, 'from xml.etree import ElementTree\n')]
|
import xml.etree.ElementTree as ET
from os import sys
#Return *.c for 1, *.o for 2
#tipo = sys.argv[1]
#names_files = []
names_files = []
names_functions = []
hosts_ips = []
names = ""
root = ET.parse('startup.xml').getroot()
arquivo = open("Makefile","w")
functions_file = open("functions_names","w")
hosts = open("comm/hosts.cfg","w")
config = open("comm/config","w")
rank = 0
cont_rank = 0
cont = 0
for neighbor in root.iter('ipmachine'):
cont_rank = 0
hosts.write(neighbor.get('ip')+" slots="+neighbor.get('ngpus')+"\n")
while(cont_rank < int(neighbor.get('ngpus'))):
config.write(str(rank)+"\t"+str(cont_rank)+"\n")
cont_rank = cont_rank + 1
rank = rank + 1
for neighbor in root.iter('file'):
names_files.append(neighbor.get('name'))
for neighbor in root.iter('func'):
cont +=1
names_functions.append(neighbor.get('funcName'))
for neighbor in root.iter('file'):
names = names +" "+ neighbor.get('name')
functions_file.write(str(cont)+"\n")
for i in names_functions:
functions_file.write(i+"\n")
#for i in hosts_ips:
# hosts.write(i+"\n");
objs = ""
for i in names_files:
objs = objs+i.strip(".cu")+".o"+" "
arquivo.write("CUFILES= "+names+"\n")
arquivo.write("OBJS= "+objs+"\n")
arquivo.write("TARGET= "+objs+" link.o libfw.so"+"\n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("all: $(TARGET)"+"\n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("libfw.so: link.o \n")
arquivo.write("\tg++ -shared -Wl,-soname,libfw.so -o libfw.so " + objs +" comm/comm.o link.o -L/usr/local/cuda-8.0/lib64 -lcudart \n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("link.o: "+names+"\n")
arquivo.write("\tnvcc -m64 -arch=sm_20 -dlink -Xcompiler -fPIC comm/comm.o "+objs+" -o link.o\n")
arquivo.write("\n")
arquivo.write("\n")
for i in names_files:
arquivo.write(i.strip(".cu")+".o: "+i+" "+i.strip(".cu")+".h comm/comm.h\n")
arquivo.write("\tnvcc -m64 -arch=sm_20 -dc -Xcompiler -fPIC -c "+i+" \n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("clean: \n")
arquivo.write("\trm -f link.o libfw.so "+ objs +" \n")
|
[
"xml.etree.ElementTree.parse"
] |
[((194, 217), 'xml.etree.ElementTree.parse', 'ET.parse', (['"""startup.xml"""'], {}), "('startup.xml')\n", (202, 217), True, 'import xml.etree.ElementTree as ET\n')]
|
import pytest
from pysyncgateway.exceptions import DoesNotExist
def test(recipe_document, database):
reload_document = database.get_document('butter_chicken')
result = reload_document.retrieve()
assert result is True
for key in list(reload_document.data.keys()):
assert isinstance(key, str)
assert sorted(list(reload_document.data)) == ['ingredients', 'recipe']
assert reload_document.data['ingredients'] == ['chicken', 'butter']
assert reload_document.data['recipe'] == 'Mix the chicken and the butter. Voila!'
assert isinstance(reload_document.data['recipe'], str)
assert reload_document.rev == recipe_document.rev
assert reload_document.channels == ()
def test_channels(permissions_document, database):
"""
Document with no data can be retrieved, channels are updated
"""
reload_document = database.get_document('permission-list')
result = reload_document.retrieve()
assert result is True
assert reload_document.data == {}
assert reload_document.rev == permissions_document.rev
assert reload_document.channels == ('acc.1234', 'acc.7882')
# --- FAILURES ---
def test_missing(empty_document):
with pytest.raises(DoesNotExist):
empty_document.retrieve()
|
[
"pytest.raises"
] |
[((1200, 1227), 'pytest.raises', 'pytest.raises', (['DoesNotExist'], {}), '(DoesNotExist)\n', (1213, 1227), False, 'import pytest\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import py_trees
import random
class Foo(py_trees.behaviour.Behaviour):
def __init__(self, name):
"""
Minimal one-time initialisation. A good rule of thumb is
to only include the initialisation relevant for being able
to insert this behaviour in a tree for offline rendering to
dot graphs.
Other one-time initialisation requirements should be met via
the setup() method.
"""
super(Foo, self).__init__(name)
def setup(self):
"""
When is this called?
This function should be either manually called by your program
to setup this behaviour alone, or more commonly, via
:meth:`~py_trees.behaviour.Behaviour.setup_with_descendants`
or :meth:`~py_trees.trees.BehaviourTree.setup`, both of which
will iterate over this behaviour, it's children (it's children's
children ...) calling :meth:`~py_trees.behaviour.Behaviour.setup`
on each in turn.
If you have vital initialisation necessary to the success
execution of your behaviour, put a guard in your
:meth:`~py_trees.behaviour.Behaviour.initialise` method
to protect against entry without having been setup.
What to do here?
Delayed one-time initialisation that would otherwise interfere
with offline rendering of this behaviour in a tree to dot graph
or validation of the behaviour's configuration.
Good examples include:
- Hardware or driver initialisation
- Middleware initialisation (e.g. ROS pubs/subs/services)
- A parallel checking for a valid policy configuration after
children have been added or removed
"""
self.logger.debug(" %s [Foo::setup()]" % self.name)
def initialise(self):
"""
When is this called?
The first time your behaviour is ticked and anytime the
status is not RUNNING thereafter.
What to do here?
Any initialisation you need before putting your behaviour
to work.
"""
self.logger.debug(" %s [Foo::initialise()]" % self.name)
def update(self):
"""
When is this called?
Every time your behaviour is ticked.
What to do here?
- Triggering, checking, monitoring. Anything...but do not block!
- Set a feedback message
- return a py_trees.common.Status.[RUNNING, SUCCESS, FAILURE]
"""
self.logger.debug(" %s [Foo::update()]" % self.name)
ready_to_make_a_decision = random.choice([True, False])
decision = random.choice([True, False])
if not ready_to_make_a_decision:
return py_trees.common.Status.RUNNING
elif decision:
self.feedback_message = "We are not bar!"
return py_trees.common.Status.SUCCESS
else:
self.feedback_message = "Uh oh"
return py_trees.common.Status.FAILURE
def terminate(self, new_status):
"""
When is this called?
Whenever your behaviour switches to a non-running state.
- SUCCESS || FAILURE : your behaviour's work cycle has finished
- INVALID : a higher priority branch has interrupted, or shutting down
"""
self.logger.debug(" %s [Foo::terminate().terminate()][%s->%s]" % (self.name, self.status, new_status))
|
[
"random.choice"
] |
[((2678, 2706), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (2691, 2706), False, 'import random\n'), ((2726, 2754), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (2739, 2754), False, 'import random\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/tslasso.main.ipynb (unless otherwise specified).
__all__ = ['run_exp']
# Cell
from ..atomgeom.features import get_features,get_D_feats_feats
from ..atomgeom.utils import get_atoms_4
from ..simulations.rigidethanol import get_rigid_ethanol_data
from ..utils.utils import get_234_indices, get_atoms3_full, get_atoms4_full, data_stream_custom_range, get_cosines
from ..geometry.geometry import get_geom, get_wlpca_tangent_sel, get_rm_tangent_sel
from ..statistics.normalization import normalize_L212
from ..optimization.gradientgrouplasso import get_sr_lambda_parallel
from ..optimization.utils import get_selected_function_ids,get_selected_functions_lm2
from ..utils.replicates import Replicate, get_supports_brute_tslasso,get_supports_lasso
from megaman.embedding import SpectralEmbedding
import dill as pickle
import os
import sys
import numpy as np
import itertools
from itertools import permutations,combinations
from sklearn.decomposition import TruncatedSVD
import pathos
from pathos.multiprocessing import ProcessingPool as Pool
# Cell
def run_exp(positions, hparams):
d = hparams.d
n_components = hparams.n_components
atoms2_feat = hparams.atoms2_feat
atoms3_feat = hparams.atoms3_feat
atoms4_feat = hparams.atoms4_feat
atoms2_dict = hparams.atoms2_dict
atoms3_dict = hparams.atoms3_dict
atoms4_dict = hparams.atoms4_dict
diagram = hparams.diagram
ii = np.asarray(hparams.ii)
jj = np.asarray(hparams.jj)
outfile = hparams.outdir + '/' + hparams.name + 'results_tslasso'
#load geometric features
natoms = positions.shape[1]
n = positions.shape[0]
atoms2 = np.asarray(list(itertools.combinations(range(natoms), 2)))
atoms2full = atoms2
atoms3 = np.asarray(list(itertools.combinations(range(natoms), 3)))
atoms4 = np.asarray(list(itertools.combinations(range(natoms), 4)))
atoms3full = get_atoms3_full(atoms3)
atoms4full = get_atoms4_full(atoms4)
if atoms2_feat:
atoms2_feats = atoms2full
else:
atoms2_feats = np.asarray([])
if atoms3_feat:
atoms3_feats = atoms3full
else:
atoms3_feats = np.asarray([])
if atoms4_feat:
atoms4_feats = atoms4full
else:
atoms4_feats = np.asarray([])
#compute rotation/translation invariant featureization
cores = pathos.multiprocessing.cpu_count() - 1
pool = Pool(cores)
print('feature dimensions',atoms2_feats.shape, atoms3_feats.shape,atoms4_feats.shape)
#import pdb;pdb.set_trace
results = pool.map(lambda i: get_features(positions[i],
atoms2 = atoms2_feats,
atoms3 = atoms3_feats,
atoms4 = atoms4_feats),
data_stream_custom_range(list(range(n))))
data = np.vstack([np.hstack(results[i]) for i in range(n)])
data = data - np.mean(data, axis = 0)
#apply SVD
svd = TruncatedSVD(n_components=50)
data_svd = svd.fit_transform(data)
#compute geometry
radius = hparams.radius
n_neighbors = hparams.n_neighbors
geom = get_geom(data_svd, radius, n_neighbors)
print('computing embedding (for comparison)')
spectral_embedding = SpectralEmbedding(n_components=n_components,eigen_solver='arpack',geom=geom)
embed_spectral = spectral_embedding.fit_transform(data_svd)
embedding = embed_spectral
#obtain gradients
if atoms2_dict:
atoms2_dicts = atoms2full
else:
atoms2_dicts = np.asarray([])
if atoms3_dict:
atoms3_dicts = atoms3full
else:
atoms3_dicts = np.asarray([])
if atoms4_dict and not diagram:
atoms4_dicts = atoms4full
elif atoms4_dict:
atoms4_dicts= get_atoms_4(natoms, ii, jj)[0]
else:
atoms4_dicts = np.asarray([])
p = len(atoms2_dicts) + len(atoms3_dicts) + len(atoms4_dicts)
#get gradients
replicates = {}
nreps = hparams.nreps
nsel = hparams.nsel
for r in range(nreps):
#print(i)
replicates[r] = Replicate(nsel = nsel, n = 10000)
replicates[r].tangent_bases_M = get_wlpca_tangent_sel(data_svd, geom, replicates[r].selected_points, d)
D_feats_feats = np.asarray([get_D_feats_feats(positions[replicates[r].selected_points[i]],
atoms2in = atoms2_feats,
atoms3in = atoms3_feats,
atoms4in = atoms4_feats,
atoms2out = atoms2_dicts,
atoms3out = atoms3_dicts,
atoms4out = atoms4_dicts) for i in range(nsel)])
replicates[r].dg_x = np.asarray([svd.transform(D_feats_feats[i].transpose()).transpose() for i in range(nsel)])
replicates[r].dg_x_normalized = normalize_L212(replicates[r].dg_x)
replicates[r].dg_M = np.einsum('i b p, i b d -> i d p', replicates[r].dg_x_normalized, replicates[r].tangent_bases_M)
#run ts lasso
gl_itermax= hparams.gl_itermax
reg_l2 = hparams.reg_l2
max_search = hparams.max_search
d = hparams.d
tol = hparams.tol
learning_rate = hparams.learning_rate
for r in range(nreps):
replicates[r].results = get_sr_lambda_parallel(np.asarray([np.identity(d) for i in range(nsel)]), replicates[r].dg_M, gl_itermax,reg_l2, max_search, d, tol,learning_rate)
replicates[r].get_ordered_axes()
replicates[r].sel_l = replicates[r].get_selection_lambda()
#get manifold lasso support
selected_functions_unique = np.asarray(np.unique(get_selected_function_ids(replicates,d)), dtype = int)
support_tensor_lasso, supports_lasso = get_supports_lasso(replicates,p,d)
#get two stage support
selected_functions_lm2 = get_selected_functions_lm2(replicates)
support_tensor_ts, supports_ts = get_supports_brute_tslasso(replicates,nreps,p,d,selected_functions_lm2)
selected_functions_unique_twostage = np.asarray(np.unique(supports_ts), dtype = int)#np.unique(np.asarray(np.where(support_tensor_ts > 0.)[0], dtype = int))
pool.close()
pool.restart()
#compute function values for plotting... needs 'order234' for full computation
print('computing selected function values lasso, ' + str(selected_functions_unique))
selected_function_values = pool.map(
lambda i: get_features(positions[i],
atoms2 = np.asarray([]),
atoms3 = np.asarray([]),
atoms4 = atoms4_dicts[selected_functions_unique]),
data_stream_custom_range(list(range(n))))
selected_function_values_array = np.vstack([np.hstack(selected_function_values[i]) for i in range(n)])
print('computing selected function values two stage, ' + str(selected_functions_unique_twostage))
selected_function_values_brute = pool.map(
lambda i: get_features(positions[i],
atoms2 = np.asarray([]),
atoms3 = np.asarray([]),
atoms4 = atoms4_dicts[selected_functions_unique_twostage]),
data_stream_custom_range(list(range(n))))
selected_function_values_array_brute = np.vstack([np.hstack(selected_function_values_brute[i]) for i in range(n)])
#remove large gradient arrays
print('prep save')
replicates_small = {}
for r in range(nreps):
replicates_small[r] = Replicate(nsel=nsel, n=n,
selected_points=replicates[r].selected_points)
replicates_small[r].dg_M = replicates[r].dg_M
replicates_small[r].cs_reorder = replicates[r].cs_reorder
replicates_small[r].xaxis_reorder = replicates[r].xaxis_reorder
print('getting cosines')
cosine = get_cosines(replicates[0].dg_M)
replicates_small[0].cosine_abs = np.mean(np.abs(cosine), axis = 0)
#prepare to save
results = {}
results['replicates_small'] = replicates_small
results['data'] = data_svd
results['embed'] = embedding
results['supports_ts'] = support_tensor_ts, supports_ts
results['supports_lasso'] = support_tensor_lasso, supports_lasso
results['supports_lasso_values'] = selected_function_values
results['supports_ts_values'] = selected_function_values_brute
results['selected_lasso'] = selected_functions_unique
results['selected_ts'] = selected_functions_unique_twostage
results['geom'] = geom
results['dictionary'] = {}
results['dictionary']['atoms2'] = atoms2_dicts
results['dictionary']['atoms3'] = atoms3_dicts
results['dictionary']['atoms4'] = atoms4_dicts
#save
with open(outfile,'wb') as output:
pickle.dump(results, output, pickle.HIGHEST_PROTOCOL)
|
[
"numpy.abs",
"pathos.multiprocessing.cpu_count",
"sklearn.decomposition.TruncatedSVD",
"numpy.asarray",
"numpy.einsum",
"numpy.identity",
"numpy.hstack",
"megaman.embedding.SpectralEmbedding",
"numpy.mean",
"dill.dump",
"pathos.multiprocessing.ProcessingPool",
"numpy.unique"
] |
[((1456, 1478), 'numpy.asarray', 'np.asarray', (['hparams.ii'], {}), '(hparams.ii)\n', (1466, 1478), True, 'import numpy as np\n'), ((1488, 1510), 'numpy.asarray', 'np.asarray', (['hparams.jj'], {}), '(hparams.jj)\n', (1498, 1510), True, 'import numpy as np\n'), ((2422, 2433), 'pathos.multiprocessing.ProcessingPool', 'Pool', (['cores'], {}), '(cores)\n', (2426, 2433), True, 'from pathos.multiprocessing import ProcessingPool as Pool\n'), ((2959, 2988), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(50)'}), '(n_components=50)\n', (2971, 2988), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((3245, 3323), 'megaman.embedding.SpectralEmbedding', 'SpectralEmbedding', ([], {'n_components': 'n_components', 'eigen_solver': '"""arpack"""', 'geom': 'geom'}), "(n_components=n_components, eigen_solver='arpack', geom=geom)\n", (3262, 3323), False, 'from megaman.embedding import SpectralEmbedding\n'), ((2079, 2093), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (2089, 2093), True, 'import numpy as np\n'), ((2182, 2196), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (2192, 2196), True, 'import numpy as np\n'), ((2285, 2299), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (2295, 2299), True, 'import numpy as np\n'), ((2372, 2406), 'pathos.multiprocessing.cpu_count', 'pathos.multiprocessing.cpu_count', ([], {}), '()\n', (2404, 2406), False, 'import pathos\n'), ((2909, 2930), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2916, 2930), True, 'import numpy as np\n'), ((3527, 3541), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (3537, 3541), True, 'import numpy as np\n'), ((3629, 3643), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (3639, 3643), True, 'import numpy as np\n'), ((4821, 4921), 'numpy.einsum', 'np.einsum', (['"""i b p, i b d -> i d p"""', 'replicates[r].dg_x_normalized', 'replicates[r].tangent_bases_M'], {}), "('i b p, i b d -> i d p', replicates[r].dg_x_normalized,\n replicates[r].tangent_bases_M)\n", (4830, 4921), True, 'import numpy as np\n'), ((5911, 5933), 'numpy.unique', 'np.unique', (['supports_ts'], {}), '(supports_ts)\n', (5920, 5933), True, 'import numpy as np\n'), ((7922, 7936), 'numpy.abs', 'np.abs', (['cosine'], {}), '(cosine)\n', (7928, 7936), True, 'import numpy as np\n'), ((8753, 8806), 'dill.dump', 'pickle.dump', (['results', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(results, output, pickle.HIGHEST_PROTOCOL)\n', (8764, 8806), True, 'import dill as pickle\n'), ((2849, 2870), 'numpy.hstack', 'np.hstack', (['results[i]'], {}), '(results[i])\n', (2858, 2870), True, 'import numpy as np\n'), ((3822, 3836), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (3832, 3836), True, 'import numpy as np\n'), ((6669, 6707), 'numpy.hstack', 'np.hstack', (['selected_function_values[i]'], {}), '(selected_function_values[i])\n', (6678, 6707), True, 'import numpy as np\n'), ((7291, 7335), 'numpy.hstack', 'np.hstack', (['selected_function_values_brute[i]'], {}), '(selected_function_values_brute[i])\n', (7300, 7335), True, 'import numpy as np\n'), ((5213, 5227), 'numpy.identity', 'np.identity', (['d'], {}), '(d)\n', (5224, 5227), True, 'import numpy as np\n'), ((6380, 6394), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (6390, 6394), True, 'import numpy as np\n'), ((6448, 6462), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (6458, 6462), True, 'import numpy as np\n'), ((6987, 7001), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (6997, 7001), True, 'import numpy as np\n'), ((7055, 7069), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (7065, 7069), True, 'import numpy as np\n')]
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import attr
from attr import ib as Field
from addonpayments.validators import RequestValidator
from addonpayments.utils import GenerationUtils
from addonpayments.mixins import HashMixin
from addonpayments.api.mixins import XmlMixin
@attr.s
class ApiRequest(HashMixin, XmlMixin):
"""
Super class representing a request to be sent to API.
This class contains all common attributes and functions for all other classes.
You can consult the specific documentation of all request fields on the website
https://desarrolladores.addonpayments.com
Subclasses values (fields to be defined in the subclasses):
request_type Type of the Addonpayments request (auth, receipt-in, payer-new, card-new, ...)
Mixin HashMixin attributes:
hash_fields Hash a string made up of the request values
Mixin XMLMixin attributes:
xml_root_tag If the object is a Request the root tag is <request attributes></ request>.
xml_root_attributes Normalized request objects always have timestamp and type attributes in the root tag
"""
# Mandatory field
merchantid = Field(validator=RequestValidator.merchant_id)
type = Field(default=None)
# Mandatory fields with auto-generation
timestamp = Field(default=None, validator=RequestValidator.timestamp)
orderid = Field(default=None, validator=RequestValidator.order_id)
# Mandatory fields generated later
sha1hash = Field(default=None, validator=RequestValidator.sha1hash)
# Optional field
account = Field(default='', validator=RequestValidator.account)
# Static variables
# Defined in subclasses
request_type = ''
# Default values for XmlMixin, all XML requests starts with <request type='' timestamp=''>
xml_root_tag = 'request'
xml_root_attributes = ['timestamp', 'type']
def __attrs_post_init__(self):
"""
This method will be called after the class is fully initialized.
Uses method to set auto-generate values if they have not been initialized and request type
"""
self.type = self.request_type
gen_utl = GenerationUtils()
if not self.timestamp:
self.timestamp = gen_utl.generate_timestamp()
if not self.orderid:
self.orderid = gen_utl.generate_order_id()
def hash(self, secret):
"""
Set and validate sha1hash
:param secret: string
"""
self.sha1hash = self.generate_hash(secret)
# Validate hash
attr.validate(self)
return self.sha1hash
|
[
"attr.validate",
"attr.ib",
"addonpayments.utils.GenerationUtils"
] |
[((1235, 1280), 'attr.ib', 'Field', ([], {'validator': 'RequestValidator.merchant_id'}), '(validator=RequestValidator.merchant_id)\n', (1240, 1280), True, 'from attr import ib as Field\n'), ((1292, 1311), 'attr.ib', 'Field', ([], {'default': 'None'}), '(default=None)\n', (1297, 1311), True, 'from attr import ib as Field\n'), ((1373, 1430), 'attr.ib', 'Field', ([], {'default': 'None', 'validator': 'RequestValidator.timestamp'}), '(default=None, validator=RequestValidator.timestamp)\n', (1378, 1430), True, 'from attr import ib as Field\n'), ((1445, 1501), 'attr.ib', 'Field', ([], {'default': 'None', 'validator': 'RequestValidator.order_id'}), '(default=None, validator=RequestValidator.order_id)\n', (1450, 1501), True, 'from attr import ib as Field\n'), ((1557, 1613), 'attr.ib', 'Field', ([], {'default': 'None', 'validator': 'RequestValidator.sha1hash'}), '(default=None, validator=RequestValidator.sha1hash)\n', (1562, 1613), True, 'from attr import ib as Field\n'), ((1650, 1703), 'attr.ib', 'Field', ([], {'default': '""""""', 'validator': 'RequestValidator.account'}), "(default='', validator=RequestValidator.account)\n", (1655, 1703), True, 'from attr import ib as Field\n'), ((2238, 2255), 'addonpayments.utils.GenerationUtils', 'GenerationUtils', ([], {}), '()\n', (2253, 2255), False, 'from addonpayments.utils import GenerationUtils\n'), ((2629, 2648), 'attr.validate', 'attr.validate', (['self'], {}), '(self)\n', (2642, 2648), False, 'import attr\n')]
|
__author__ = 'hofmann'
import os
import glob
import time
import shutil
import tempfile
from scripts.Validator.validator import Validator
class MGCluster(Validator):
"""
Alignment and clustering of marker genes with references
"""
_label = "MGCluster"
_cluster_method_choices = ['average', 'furthest', 'nearest']
_file_name_map = "map.tsv"
_silva_ref_files = ["mothur_ref_distances", "mothur_ref_names", "mothur_alignment_ref.fasta", _file_name_map]
# mothur command: cluster.split
_mothur_cmd_cluster_split = "; ".join([
'unique.seqs(fasta={mg_fasta})',
'align.seqs(candidate=current, template={ref_align}, align=gotoh, flip=t, processors={processors})',
'remove.seqs(accnos={filename}.unique.flip.accnos, fasta=current, name=current)',
'merge.files(input={filename}.names-{ref_names}, output={filename}.merged.names)',
'merge.files(input={filename}.pick.names-{ref_names}, output={filename}.merged.names)',
'set.current(name={filename}.merged.names, column={local_dist})',
'dist.seqs(oldfasta={ref_align}, column=current, cutoff={cutoff}, processors={processors}, calc=onegap, countends=F)',
'set.current(column={local_dist})',
'cluster.split(cutoff={cutoff}, method={method}, precision={precision}, column={local_dist}, name={filename}.merged.names)'
])
# mothur command: cluster
_mothur_cmd_cluster = "; ".join([
"unique.seqs(fasta={mg_fasta})",
"align.seqs(candidate=current, template={ref_align}, align=gotoh, flip=t, processors={processors})",
"remove.seqs(accnos={filename}.unique.flip.accnos, fasta=current, name=current)",
"merge.files(input={filename}.names-{ref_names}, output={filename}.merged.names)",
"merge.files(input={filename}.pick.names-{ref_names}, output={filename}.merged.names)",
"set.current(name={filename}.merged.names, column={local_dist})",
"dist.seqs(oldfasta={ref_align}, column=current, cutoff={cutoff}, processors={processors}, calc=onegap, countends=F)",
"set.current(column={local_dist})",
"cluster(cutoff={cutoff}, method={method}, precision={precision}, column={local_dist}, name={filename}.merged.names)"
])
def __init__(
self, mothur_executable, directory_silva_reference, max_processors=1, temp_directory=None,
logfile=None, verbose=False, debug=False):
"""
Constructor
@param mothur_executable: File path to mothur binary
@type mothur_executable: str | unicode
@param directory_silva_reference: Path to directory with SILVA reference database files
@type directory_silva_reference: str | unicode
@param max_processors: Maximum number of available processors
@type max_processors: int | long
@param temp_directory: Directory for temporary data
@type temp_directory: str | unicode
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | basestring
@param verbose: Not verbose means that only warnings and errors will be past to stream
@type verbose: bool
@param debug: Display debug messages
@type debug: bool
"""
assert self.validate_file(mothur_executable, executable=True)
assert self.validate_dir(directory_silva_reference, file_names=self._silva_ref_files)
assert self.validate_number(max_processors, minimum=1)
assert self.validate_dir(temp_directory)
super(MGCluster, self).__init__(logfile=logfile, verbose=verbose, debug=False)
self._mothur_executable = mothur_executable
self._tmp_dir = tempfile.mkdtemp(dir=temp_directory)
self._max_processors = max_processors
self._debug = debug
ref_silva_distances = self._get_symbolic_link_path(os.path.join(directory_silva_reference, "mothur_ref_distances"))
ref_silva_names = self._get_symbolic_link_path(os.path.join(directory_silva_reference, "mothur_ref_names")) # unique
ref_silva_alignment = self._get_symbolic_link_path(os.path.join(directory_silva_reference, "mothur_alignment_ref.fasta"))
self._ref_silva_distances = ref_silva_distances
self._ref_silva_names = ref_silva_names
self._ref_silva_alignment = ref_silva_alignment
# local_distance = os.path.join(self._working_dir, "ref.align.dist")
self._local_distance = "ref.align.dist"
def __exit__(self, type, value, traceback):
super(MGCluster, self).__exit__(type, value, traceback)
if not self._debug:
shutil.rmtree(self._tmp_dir)
def cluster(self, marker_gene_fasta, output_cluster_file, distance_cutoff, precision=1000, method="average"):
"""
CLuster Markergenes
@param marker_gene_fasta: Fasta formatted file with marker genes
@type marker_gene_fasta: str | unicode
@param output_cluster_file: Output of mg clustering
@type output_cluster_file: str | unicode
@param distance_cutoff: Exclude irrelevant higher distances before clustering
@type distance_cutoff: int | long
@param precision: Cluster are made in steps: 10: 0.1, 100: 0.01, 1000: 0.001
@type precision: int | long
@param method: Cluster algorithm 'average', 'furthest', 'nearest'
@type method: str | unicode
@rtype: None
"""
assert self.validate_file(marker_gene_fasta)
assert self.validate_dir(output_cluster_file, only_parent=True)
assert self.validate_number(distance_cutoff, minimum=0, maximum=1)
assert self.validate_number(precision, minimum=0)
assert method in self._cluster_method_choices
self._logger.info("Starting clustering process")
start = time.time()
old_dir = os.getcwd()
# local paths required or mothur messes up the dist.seqs command, do not use absolut paths!!!
os.chdir(self._tmp_dir)
local_marker_gene_fasta = self._get_symbolic_link_path(marker_gene_fasta)
shutil.copy2(self._ref_silva_distances, self._local_distance)
mothur_cmd = self._get_mothur_cmd(local_marker_gene_fasta, distance_cutoff, precision, method=method)
cmd = "{mothur_executable} '#{mothur_cmd}'".format(
mothur_executable=self._mothur_executable,
mothur_cmd=mothur_cmd)
os.system(cmd)
os.chdir(old_dir)
project_folder = os.path.dirname(output_cluster_file)
find_mask_list = os.path.join(self._tmp_dir, "*.list")
list_of_files = glob.glob(find_mask_list)
if len(list_of_files) == 0:
msg = "Clustering with mothur failed #1"
self._logger.error(msg)
raise RuntimeError(msg)
elif len(list_of_files) == 1:
local_distance = os.path.join(self._tmp_dir, "ref.align.dist")
if os.path.exists(local_distance):
if self._debug:
shutil.copy2(local_distance, os.path.join(project_folder, "mothur_distances.tsv"))
shutil.copy2(list_of_files[0], output_cluster_file)
self._logger.info("Clustering success")
else:
msg = "Clustering with mothur failed #2"
self._logger.error(msg)
raise RuntimeError(msg)
else:
msg = "Clustering with odd result, several files found!"
self._logger.error(msg)
raise RuntimeError(msg)
end = time.time()
# move logfiles
find_mask_list = os.path.join(self._tmp_dir, "*.logfile")
list_of_log_files = glob.glob(find_mask_list)
for log_file in list_of_log_files:
log_file_name = os.path.basename(log_file)
shutil.copy2(log_file, os.path.join(project_folder, log_file_name))
self._logger.info("Done ({}s)".format(round(end - start), 1))
def _get_symbolic_link_path(self, original_file_path):
"""
Get path to local symbolic link since mothur might act odd else.
@param original_file_path:
@type original_file_path: str | unicode
@return: Local path
@rtype: str | unicode
"""
assert isinstance(original_file_path, basestring)
basename = os.path.basename(original_file_path)
new_path = os.path.join(self._tmp_dir, basename)
os.symlink(original_file_path, new_path)
# return new_path
return basename
def _get_mothur_cmd(self, marker_gene_fasta, cutoff, precision, method="average"):
"""
Get command line to run mothur
@param marker_gene_fasta: Fasta formatted file with marker genes
@type marker_gene_fasta: str | unicode
@param cutoff: Exclude irrelevant higher distances before clustering
@type cutoff: int | long
@param precision: Cluster are made in steps: 10: 0.1, 100: 0.01, 1000: 0.001
@type precision: int | long
@param method: Cluster algorithm 'average', 'furthest', 'nearest'
@type method: str | unicode
@return: Command line
@rtype: str | unicode
"""
assert self.validate_file(marker_gene_fasta)
assert self.validate_number(cutoff, minimum=0, maximum=1)
assert self.validate_number(precision, minimum=0)
assert method in self._cluster_method_choices
# basename = os.path.basename(marker_gene_fasta)
# filename, extension = os.path.splitext(basename)
filename, extension = os.path.splitext(marker_gene_fasta)
#
# mothur_cmd = MGCluster._mothur_cmd_cluster
mothur_cmd = MGCluster._mothur_cmd_cluster_split
return mothur_cmd.format(
wd=self._tmp_dir,
debug=self._tmp_dir,
# filename=os.path.join(self._working_dir, filename),
filename=filename,
mg_fasta=marker_gene_fasta,
ref_align=self._ref_silva_alignment,
ref_names=self._ref_silva_names,
local_dist=self._local_distance,
processors=self._max_processors,
cutoff=cutoff,
precision=precision,
method=method)
@staticmethod
def get_file_name_of_map():
return MGCluster._file_name_map
|
[
"os.path.basename",
"os.getcwd",
"shutil.copy2",
"os.path.dirname",
"os.path.exists",
"os.system",
"time.time",
"tempfile.mkdtemp",
"os.path.splitext",
"glob.glob",
"shutil.rmtree",
"os.symlink",
"os.path.join",
"os.chdir"
] |
[((3389, 3425), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'dir': 'temp_directory'}), '(dir=temp_directory)\n', (3405, 3425), False, 'import tempfile\n'), ((5297, 5308), 'time.time', 'time.time', ([], {}), '()\n', (5306, 5308), False, 'import time\n'), ((5321, 5332), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5330, 5332), False, 'import os\n'), ((5431, 5454), 'os.chdir', 'os.chdir', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (5439, 5454), False, 'import os\n'), ((5533, 5594), 'shutil.copy2', 'shutil.copy2', (['self._ref_silva_distances', 'self._local_distance'], {}), '(self._ref_silva_distances, self._local_distance)\n', (5545, 5594), False, 'import shutil\n'), ((5828, 5842), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (5837, 5842), False, 'import os\n'), ((5845, 5862), 'os.chdir', 'os.chdir', (['old_dir'], {}), '(old_dir)\n', (5853, 5862), False, 'import os\n'), ((5883, 5919), 'os.path.dirname', 'os.path.dirname', (['output_cluster_file'], {}), '(output_cluster_file)\n', (5898, 5919), False, 'import os\n'), ((5939, 5976), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""*.list"""'], {}), "(self._tmp_dir, '*.list')\n", (5951, 5976), False, 'import os\n'), ((5995, 6020), 'glob.glob', 'glob.glob', (['find_mask_list'], {}), '(find_mask_list)\n', (6004, 6020), False, 'import glob\n'), ((6733, 6744), 'time.time', 'time.time', ([], {}), '()\n', (6742, 6744), False, 'import time\n'), ((6783, 6823), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""*.logfile"""'], {}), "(self._tmp_dir, '*.logfile')\n", (6795, 6823), False, 'import os\n'), ((6846, 6871), 'glob.glob', 'glob.glob', (['find_mask_list'], {}), '(find_mask_list)\n', (6855, 6871), False, 'import glob\n'), ((7410, 7446), 'os.path.basename', 'os.path.basename', (['original_file_path'], {}), '(original_file_path)\n', (7426, 7446), False, 'import os\n'), ((7460, 7497), 'os.path.join', 'os.path.join', (['self._tmp_dir', 'basename'], {}), '(self._tmp_dir, basename)\n', (7472, 7497), False, 'import os\n'), ((7500, 7540), 'os.symlink', 'os.symlink', (['original_file_path', 'new_path'], {}), '(original_file_path, new_path)\n', (7510, 7540), False, 'import os\n'), ((8507, 8542), 'os.path.splitext', 'os.path.splitext', (['marker_gene_fasta'], {}), '(marker_gene_fasta)\n', (8523, 8542), False, 'import os\n'), ((3541, 3604), 'os.path.join', 'os.path.join', (['directory_silva_reference', '"""mothur_ref_distances"""'], {}), "(directory_silva_reference, 'mothur_ref_distances')\n", (3553, 3604), False, 'import os\n'), ((3655, 3714), 'os.path.join', 'os.path.join', (['directory_silva_reference', '"""mothur_ref_names"""'], {}), "(directory_silva_reference, 'mothur_ref_names')\n", (3667, 3714), False, 'import os\n'), ((3779, 3848), 'os.path.join', 'os.path.join', (['directory_silva_reference', '"""mothur_alignment_ref.fasta"""'], {}), "(directory_silva_reference, 'mothur_alignment_ref.fasta')\n", (3791, 3848), False, 'import os\n'), ((4234, 4262), 'shutil.rmtree', 'shutil.rmtree', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (4247, 4262), False, 'import shutil\n'), ((6928, 6954), 'os.path.basename', 'os.path.basename', (['log_file'], {}), '(log_file)\n', (6944, 6954), False, 'import os\n'), ((6201, 6246), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""ref.align.dist"""'], {}), "(self._tmp_dir, 'ref.align.dist')\n", (6213, 6246), False, 'import os\n'), ((6253, 6283), 'os.path.exists', 'os.path.exists', (['local_distance'], {}), '(local_distance)\n', (6267, 6283), False, 'import os\n'), ((6981, 7024), 'os.path.join', 'os.path.join', (['project_folder', 'log_file_name'], {}), '(project_folder, log_file_name)\n', (6993, 7024), False, 'import os\n'), ((6397, 6448), 'shutil.copy2', 'shutil.copy2', (['list_of_files[0]', 'output_cluster_file'], {}), '(list_of_files[0], output_cluster_file)\n', (6409, 6448), False, 'import shutil\n'), ((6339, 6391), 'os.path.join', 'os.path.join', (['project_folder', '"""mothur_distances.tsv"""'], {}), "(project_folder, 'mothur_distances.tsv')\n", (6351, 6391), False, 'import os\n')]
|
from datetime import timedelta
from celery import Celery
from ..conf import setting
celery = Celery(
'noweibo',
broker=setting.CELERY_BROKER,
)
celery.conf.update(
CELERY_TIMEZONE='Asia/Shanghai',
CELERY_IMPORTS=('noweibo.tasks.periodic', ),
CELERY_RESULT_BACKEND=setting.CELERY_BACKEND,
CELERY_IGNORE_RESULT=True,
CELERY_ACCEPT_CONTENT=['pickle', 'json', ],
CELERY_TASK_SERIALIZER='pickle',
CELERYD_MAX_TASKS_PER_CHILD=100,
CELERYBEAT_SCHEDULE={
'user_update': {
'task': 'noweibo.tasks.periodic.user_update',
'schedule': timedelta(minutes=setting.SCHEDULE_PERIODIC),
'args': (),
},
'weibo_update': {
'task': 'noweibo.tasks.periodic.weibo_update',
'schedule': timedelta(minutes=setting.SCHEDULE_PERIODIC),
'args': (),
},
'weibo_scan': {
'task': 'noweibo.tasks.periodic.weibo_scan',
'schedule': timedelta(minutes=setting.SCHEDULE_PERIODIC),
'args': (),
},
'weibo_delete': {
'task': 'noweibo.tasks.periodic.weibo_delete',
'schedule': timedelta(minutes=setting.SCHEDULE_PERIODIC),
'args': (),
},
},
ADMINS=(('<NAME>', '<EMAIL>'), ),
CELERY_SEND_TASK_ERROR_EMAILS=True,
SERVER_EMAIL='<EMAIL>',
EMAIL_HOST='smtp.163.com',
EMAIL_PORT=25,
EMAIL_HOST_USER='<EMAIL>',
EMAIL_HOST_PASSWORD='<PASSWORD>',
)
if __name__ == '__main__':
celery.start()
|
[
"celery.Celery",
"datetime.timedelta"
] |
[((97, 144), 'celery.Celery', 'Celery', (['"""noweibo"""'], {'broker': 'setting.CELERY_BROKER'}), "('noweibo', broker=setting.CELERY_BROKER)\n", (103, 144), False, 'from celery import Celery\n'), ((599, 643), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'setting.SCHEDULE_PERIODIC'}), '(minutes=setting.SCHEDULE_PERIODIC)\n', (608, 643), False, 'from datetime import timedelta\n'), ((789, 833), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'setting.SCHEDULE_PERIODIC'}), '(minutes=setting.SCHEDULE_PERIODIC)\n', (798, 833), False, 'from datetime import timedelta\n'), ((975, 1019), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'setting.SCHEDULE_PERIODIC'}), '(minutes=setting.SCHEDULE_PERIODIC)\n', (984, 1019), False, 'from datetime import timedelta\n'), ((1165, 1209), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'setting.SCHEDULE_PERIODIC'}), '(minutes=setting.SCHEDULE_PERIODIC)\n', (1174, 1209), False, 'from datetime import timedelta\n')]
|
"""Next-gen alignments with BWA (http://bio-bwa.sourceforge.net/)
"""
import contextlib
import gzip
import os
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from bcbio.pipeline import config_utils
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.ngsalign import novoalign
from bcbio.provenance import do
galaxy_location_file = "bwa_index.loc"
def align_bam(in_bam, ref_file, names, align_dir, config):
"""Perform direct alignment of an input BAM file with BWA using pipes.
This avoids disk IO by piping between processes:
- samtools sort of input BAM to queryname
- bedtools conversion to interleaved FASTQ
- bwa-mem alignment
- samtools conversion to BAM
- samtools sort to coordinate
"""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
samtools = config_utils.get_program("samtools", config)
bedtools = config_utils.get_program("bedtools", config)
bwa = config_utils.get_program("bwa", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
# adjust memory for samtools since used for input and output
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
3, "decrease")
rg_info = novoalign.get_rg_info(names)
if not utils.file_exists(out_file):
with utils.curdir_tmpdir() as work_dir:
with file_transaction(out_file) as tx_out_file:
tx_out_prefix = os.path.splitext(tx_out_file)[0]
prefix1 = "%s-in1" % tx_out_prefix
cmd = ("{samtools} sort -n -o -l 0 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} "
"| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout "
"| {bwa} mem -p -M -t {num_cores} -R '{rg_info}' -v 1 {ref_file} - "
"| {samtools} view -b -S -u - "
"| {samtools} sort -@ {num_cores} -m {max_mem} - {tx_out_prefix}")
cmd = cmd.format(**locals())
do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file)])
return out_file
def can_pipe(fastq_file):
"""bwa-mem handle longer (> 75bp) reads with improved piping.
Default to no piping if more than half the first 500 reads are small.
"""
min_size = 75
thresh = 0.5
tocheck = 500
shorter = 0
if fastq_file.endswith(".gz"):
handle = gzip.open(fastq_file, "rb")
else:
handle = open(fastq_file)
with contextlib.closing(handle) as in_handle:
fqit = FastqGeneralIterator(in_handle)
for i, (_, seq, _) in enumerate(fqit):
if len(seq) < min_size:
shorter += 1
if i > tocheck:
break
return (float(shorter) / float(tocheck)) <= thresh
def align_pipe(fastq_file, pair_file, ref_file, names, align_dir, config):
"""Perform piped alignment of fastq input files, generating sorted output BAM.
"""
pair_file = pair_file if pair_file else ""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
samtools = config_utils.get_program("samtools", config)
bwa = config_utils.get_program("bwa", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
# adjust memory for samtools since used alongside alignment
max_mem = config_utils.adjust_memory(resources.get("memory", "2G"),
3, "decrease")
rg_info = novoalign.get_rg_info(names)
if not utils.file_exists(out_file):
with utils.curdir_tmpdir() as work_dir:
with file_transaction(out_file) as tx_out_file:
tx_out_prefix = os.path.splitext(tx_out_file)[0]
cmd = ("{bwa} mem -M -t {num_cores} -R '{rg_info}' -v 1 {ref_file} "
"{fastq_file} {pair_file} "
"| {samtools} view -b -S -u - "
"| {samtools} sort -@ {num_cores} -m {max_mem} - {tx_out_prefix}")
cmd = cmd.format(**locals())
do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file)])
return out_file
def align(fastq_file, pair_file, ref_file, out_base, align_dir, config,
names=None):
"""Perform a BWA alignment, generating a SAM file.
"""
sai1_file = os.path.join(align_dir, "%s_1.sai" % out_base)
sai2_file = (os.path.join(align_dir, "%s_2.sai" % out_base)
if pair_file else None)
sam_file = os.path.join(align_dir, "%s.sam" % out_base)
if not utils.file_exists(sam_file):
if not utils.file_exists(sai1_file):
with file_transaction(sai1_file) as tx_sai1_file:
_run_bwa_align(fastq_file, ref_file, tx_sai1_file, config)
if sai2_file and not utils.file_exists(sai2_file):
with file_transaction(sai2_file) as tx_sai2_file:
_run_bwa_align(pair_file, ref_file, tx_sai2_file, config)
align_type = "sampe" if sai2_file else "samse"
sam_cl = [config_utils.get_program("bwa", config), align_type, ref_file, sai1_file]
if sai2_file:
sam_cl.append(sai2_file)
sam_cl.append(fastq_file)
if sai2_file:
sam_cl.append(pair_file)
with file_transaction(sam_file) as tx_sam_file:
cmd = "{cl} > {out_file}".format(cl=" ".join(sam_cl), out_file=tx_sam_file)
do.run(cmd, "bwa {align_type}".format(**locals()), None)
return sam_file
def _bwa_args_from_config(config):
num_cores = config["algorithm"].get("num_cores", 1)
core_flags = ["-t", str(num_cores)] if num_cores > 1 else []
qual_format = config["algorithm"].get("quality_format", "").lower()
qual_flags = ["-I"] if qual_format == "illumina" else []
return core_flags + qual_flags
def _run_bwa_align(fastq_file, ref_file, out_file, config):
aln_cl = [config_utils.get_program("bwa", config), "aln",
"-n %s" % config["algorithm"]["max_errors"],
"-k %s" % config["algorithm"]["max_errors"]]
aln_cl += _bwa_args_from_config(config)
aln_cl += [ref_file, fastq_file]
cmd = "{cl} > {out_file}".format(cl=" ".join(aln_cl), out_file=out_file)
do.run(cmd, "bwa aln: {f}".format(f=os.path.basename(fastq_file)), None)
|
[
"bcbio.utils.file_exists",
"bcbio.ngsalign.novoalign.get_rg_info",
"gzip.open",
"bcbio.pipeline.config_utils.get_resources",
"bcbio.provenance.do.file_nonempty",
"os.path.basename",
"Bio.SeqIO.QualityIO.FastqGeneralIterator",
"bcbio.utils.curdir_tmpdir",
"bcbio.pipeline.config_utils.get_program",
"os.path.splitext",
"contextlib.closing",
"bcbio.distributed.transaction.file_transaction",
"os.path.join"
] |
[((875, 919), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""samtools"""', 'config'], {}), "('samtools', config)\n", (899, 919), False, 'from bcbio.pipeline import config_utils\n'), ((935, 979), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""bedtools"""', 'config'], {}), "('bedtools', config)\n", (959, 979), False, 'from bcbio.pipeline import config_utils\n'), ((990, 1029), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""bwa"""', 'config'], {}), "('bwa', config)\n", (1014, 1029), False, 'from bcbio.pipeline import config_utils\n'), ((1046, 1092), 'bcbio.pipeline.config_utils.get_resources', 'config_utils.get_resources', (['"""samtools"""', 'config'], {}), "('samtools', config)\n", (1072, 1092), False, 'from bcbio.pipeline import config_utils\n'), ((1356, 1384), 'bcbio.ngsalign.novoalign.get_rg_info', 'novoalign.get_rg_info', (['names'], {}), '(names)\n', (1377, 1384), False, 'from bcbio.ngsalign import novoalign\n'), ((3278, 3322), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""samtools"""', 'config'], {}), "('samtools', config)\n", (3302, 3322), False, 'from bcbio.pipeline import config_utils\n'), ((3333, 3372), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""bwa"""', 'config'], {}), "('bwa', config)\n", (3357, 3372), False, 'from bcbio.pipeline import config_utils\n'), ((3389, 3435), 'bcbio.pipeline.config_utils.get_resources', 'config_utils.get_resources', (['"""samtools"""', 'config'], {}), "('samtools', config)\n", (3415, 3435), False, 'from bcbio.pipeline import config_utils\n'), ((3698, 3726), 'bcbio.ngsalign.novoalign.get_rg_info', 'novoalign.get_rg_info', (['names'], {}), '(names)\n', (3719, 3726), False, 'from bcbio.ngsalign import novoalign\n'), ((4605, 4651), 'os.path.join', 'os.path.join', (['align_dir', "('%s_1.sai' % out_base)"], {}), "(align_dir, '%s_1.sai' % out_base)\n", (4617, 4651), False, 'import os\n'), ((4772, 4816), 'os.path.join', 'os.path.join', (['align_dir', "('%s.sam' % out_base)"], {}), "(align_dir, '%s.sam' % out_base)\n", (4784, 4816), False, 'import os\n'), ((1396, 1423), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (1413, 1423), False, 'from bcbio import utils\n'), ((2586, 2613), 'gzip.open', 'gzip.open', (['fastq_file', '"""rb"""'], {}), "(fastq_file, 'rb')\n", (2595, 2613), False, 'import gzip\n'), ((2667, 2693), 'contextlib.closing', 'contextlib.closing', (['handle'], {}), '(handle)\n', (2685, 2693), False, 'import contextlib\n'), ((2723, 2754), 'Bio.SeqIO.QualityIO.FastqGeneralIterator', 'FastqGeneralIterator', (['in_handle'], {}), '(in_handle)\n', (2743, 2754), False, 'from Bio.SeqIO.QualityIO import FastqGeneralIterator\n'), ((3738, 3765), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (3755, 3765), False, 'from bcbio import utils\n'), ((4669, 4715), 'os.path.join', 'os.path.join', (['align_dir', "('%s_2.sai' % out_base)"], {}), "(align_dir, '%s_2.sai' % out_base)\n", (4681, 4715), False, 'import os\n'), ((4828, 4855), 'bcbio.utils.file_exists', 'utils.file_exists', (['sam_file'], {}), '(sam_file)\n', (4845, 4855), False, 'from bcbio import utils\n'), ((6166, 6205), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""bwa"""', 'config'], {}), "('bwa', config)\n", (6190, 6205), False, 'from bcbio.pipeline import config_utils\n'), ((1438, 1459), 'bcbio.utils.curdir_tmpdir', 'utils.curdir_tmpdir', ([], {}), '()\n', (1457, 1459), False, 'from bcbio import utils\n'), ((3780, 3801), 'bcbio.utils.curdir_tmpdir', 'utils.curdir_tmpdir', ([], {}), '()\n', (3799, 3801), False, 'from bcbio import utils\n'), ((4872, 4900), 'bcbio.utils.file_exists', 'utils.file_exists', (['sai1_file'], {}), '(sai1_file)\n', (4889, 4900), False, 'from bcbio import utils\n'), ((5307, 5346), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""bwa"""', 'config'], {}), "('bwa', config)\n", (5331, 5346), False, 'from bcbio.pipeline import config_utils\n'), ((5546, 5572), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['sam_file'], {}), '(sam_file)\n', (5562, 5572), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((1490, 1516), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['out_file'], {}), '(out_file)\n', (1506, 1516), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((3832, 3858), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['out_file'], {}), '(out_file)\n', (3848, 3858), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((4919, 4946), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['sai1_file'], {}), '(sai1_file)\n', (4935, 4946), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((5068, 5096), 'bcbio.utils.file_exists', 'utils.file_exists', (['sai2_file'], {}), '(sai2_file)\n', (5085, 5096), False, 'from bcbio import utils\n'), ((5115, 5142), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['sai2_file'], {}), '(sai2_file)\n', (5131, 5142), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((6530, 6558), 'os.path.basename', 'os.path.basename', (['fastq_file'], {}), '(fastq_file)\n', (6546, 6558), False, 'import os\n'), ((1565, 1594), 'os.path.splitext', 'os.path.splitext', (['tx_out_file'], {}), '(tx_out_file)\n', (1581, 1594), False, 'import os\n'), ((3907, 3936), 'os.path.splitext', 'os.path.splitext', (['tx_out_file'], {}), '(tx_out_file)\n', (3923, 3936), False, 'import os\n'), ((2238, 2267), 'bcbio.provenance.do.file_nonempty', 'do.file_nonempty', (['tx_out_file'], {}), '(tx_out_file)\n', (2254, 2267), False, 'from bcbio.provenance import do\n'), ((4378, 4407), 'bcbio.provenance.do.file_nonempty', 'do.file_nonempty', (['tx_out_file'], {}), '(tx_out_file)\n', (4394, 4407), False, 'from bcbio.provenance import do\n')]
|
from flask_login import current_user
from portality.core import app
from portality import models
# query sanitisers
##################
def public_query_validator(q):
# no deep paging
if q.from_result() > 10000:
return False
if q.size() > 200:
return False
# if the query has facets, that's fine
# otherwise, if it has no facets, only allow "count" style
# queries with zero results returned
if q.has_facets():
return True
else:
return q.size() == 0
# query filters
###############
def only_in_doaj(q):
q.clear_match_all()
q.add_must({"term" : {"admin.in_doaj" : True}})
return q
def owner(q):
q.clear_match_all()
q.add_must({"term" : {"admin.owner.exact" : current_user.id}})
return q
def update_request(q):
q.clear_match_all()
q.add_must({"range" : {"created_date" : {"gte" : app.config.get("UPDATE_REQUEST_SHOW_OLDEST")}}})
return q
def associate(q):
q.clear_match_all()
q.add_must({"term" : {"admin.editor.exact" : current_user.id}})
return q
def editor(q):
gnames = []
groups = models.EditorGroup.groups_by_editor(current_user.id)
for g in groups:
gnames.append(g.name)
q.clear_match_all()
q.add_must({"terms" : {"admin.editor_group.exact" : gnames}})
return q
def private_source(q):
q.add_include(["admin.application_status", "suggestion", "admin.ticked",
"admin.seal", "last_updated", "created_date", "id", "bibjson"])
return q
def public_source(q):
q.add_include(["admin.ticked", "admin.seal", "last_updated",
"created_date", "id", "bibjson"])
return q
# results filters
#################
def public_result_filter(results, unpacked=False):
# Dealing with single unpacked result
if unpacked:
if "admin" in results:
for k in results["admin"].keys():
if k not in ["ticked", "seal"]:
del results["admin"][k]
return results
# Dealing with a list of es results
if "hits" not in results:
return results
if "hits" not in results["hits"]:
return results
for hit in results["hits"]["hits"]:
if "_source" in hit:
if "admin" in hit["_source"]:
for k in hit["_source"]["admin"].keys():
if k not in ["ticked", "seal"]:
del hit["_source"]["admin"][k]
return results
def prune_author_emails(results, unpacked=False):
# Dealing with single unpacked ES result
if unpacked:
if "bibjson" in results:
if "author" in results["bibjson"]:
for a in results["bibjson"]["author"]:
if "email" in a:
del a["email"]
return results
# Dealing with a list of ES results
if "hits" not in results:
return results
if "hits" not in results["hits"]:
return results
for hit in results["hits"]["hits"]:
if "_source" in hit:
if "bibjson" in hit["_source"]:
if "author" in hit["_source"]["bibjson"]:
for a in hit["_source"]["bibjson"]["author"]:
if "email" in a:
del a["email"]
return results
def publisher_result_filter(results, unpacked=False):
# Dealing with single unpacked ES result
if unpacked:
if "admin" in results:
for k in results["admin"].keys():
if k not in ["ticked", "seal", "in_doaj", "related_applications", "current_application", "current_journal", "application_status"]:
del results["admin"][k]
return results
# Dealing with a list of ES results
if "hits" not in results:
return results
if "hits" not in results["hits"]:
return results
for hit in results["hits"]["hits"]:
if "_source" in hit:
if "admin" in hit["_source"]:
for k in hit["_source"]["admin"].keys():
if k not in ["ticked", "seal", "in_doaj", "related_applications", "current_application", "current_journal", "application_status"]:
del hit["_source"]["admin"][k]
return results
|
[
"portality.core.app.config.get",
"portality.models.EditorGroup.groups_by_editor"
] |
[((1118, 1170), 'portality.models.EditorGroup.groups_by_editor', 'models.EditorGroup.groups_by_editor', (['current_user.id'], {}), '(current_user.id)\n', (1153, 1170), False, 'from portality import models\n'), ((885, 929), 'portality.core.app.config.get', 'app.config.get', (['"""UPDATE_REQUEST_SHOW_OLDEST"""'], {}), "('UPDATE_REQUEST_SHOW_OLDEST')\n", (899, 929), False, 'from portality.core import app\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pyGDM2 import (structures, materials, core,
linear, fields, propagators,
tools)
def get_spectrum(geometry, step, wavelengths):
'''Obtains a uv-vis spectra for a specified geometry'''
material = materials.gold()
struct = structures.struct(step, geometry, material, verbose=False)
struct = structures.center_struct(struct)
field_generator = fields.plane_wave
kwargs = dict(theta=0, inc_angle=180)
efield = fields.efield(field_generator,
wavelengths=wavelengths, kwargs=kwargs)
dyads = propagators.DyadsQuasistatic123(n1 = 1.33, n2 = 1.33, n3 = 1.33)
sim = core.simulation(struct, efield, dyads)
sim.scatter(verbose=False)
field_kwargs = tools.get_possible_field_params_spectra(sim)
config_idx = 0
wl, spectrum = tools.calculate_spectrum(sim,
field_kwargs[config_idx], linear.extinct)
abs_ = spectrum.T[2]/np.max(spectrum.T[2])
return abs_, geometry
def obtain_spectra(step, radius_mean, radius_std, wavelength):
'''Calculates the absorption spectra of polydisperse gold spheres that have a normally distributed
radius.
Inputs:
- step: The step size used for the calculation.
- radius_mean: The mean of the normal distribution used to calculate the radius of the sphere
- radius_std: The std of the normal distribution used to calculate the radius of the sphere
- wavelength: A 1-d array of the wavelength values to calculate the absorption spectra
Outputs:
- array: A 2d array of the wavelengths and Intensity values.
'''
n_spheres = 7
radius_list = []
for i in range(n_spheres):
# Normal distribution parameters for Sphere Radius
radius_mean = 6
radius_std = 3
r = (np.random.randn(1)[0]*radius_std + radius_mean)/step
radius_list.append(r)
geometry = structures.sphere(step, R=r, mesh='cube')
loc_array = np.array([[0,0,0],[0,0,1],[0,0,-1],[1,0,0],[-1,0,0],[0,1,0],[0,-1,0]])
sphere = np.hstack((geometry[:,0].reshape(-1,1) + 30*loc_array[i,0]*radius_mean, geometry[:,1].reshape(-1,1) + 30*loc_array[i,1]*radius_mean, geometry[:,2].reshape(-1,1)+ 30*loc_array[i,2]*radius_mean))
if i == 0:
sample = sphere
else:
sample = np.vstack((sample, sphere))
I, g = get_spectrum(geometry, step, wavelength)
array = np.hstack((wavelength.reshape(-1,1), I.reshape(-1,1)))
return array
|
[
"numpy.random.randn",
"pyGDM2.materials.gold",
"pyGDM2.tools.calculate_spectrum",
"pyGDM2.structures.center_struct",
"pyGDM2.structures.struct",
"pyGDM2.core.simulation",
"numpy.max",
"pyGDM2.structures.sphere",
"numpy.array",
"pyGDM2.propagators.DyadsQuasistatic123",
"pyGDM2.fields.efield",
"pyGDM2.tools.get_possible_field_params_spectra",
"numpy.vstack"
] |
[((322, 338), 'pyGDM2.materials.gold', 'materials.gold', ([], {}), '()\n', (336, 338), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((352, 410), 'pyGDM2.structures.struct', 'structures.struct', (['step', 'geometry', 'material'], {'verbose': '(False)'}), '(step, geometry, material, verbose=False)\n', (369, 410), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((424, 456), 'pyGDM2.structures.center_struct', 'structures.center_struct', (['struct'], {}), '(struct)\n', (448, 456), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((553, 623), 'pyGDM2.fields.efield', 'fields.efield', (['field_generator'], {'wavelengths': 'wavelengths', 'kwargs': 'kwargs'}), '(field_generator, wavelengths=wavelengths, kwargs=kwargs)\n', (566, 623), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((660, 718), 'pyGDM2.propagators.DyadsQuasistatic123', 'propagators.DyadsQuasistatic123', ([], {'n1': '(1.33)', 'n2': '(1.33)', 'n3': '(1.33)'}), '(n1=1.33, n2=1.33, n3=1.33)\n', (691, 718), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((736, 774), 'pyGDM2.core.simulation', 'core.simulation', (['struct', 'efield', 'dyads'], {}), '(struct, efield, dyads)\n', (751, 774), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((825, 869), 'pyGDM2.tools.get_possible_field_params_spectra', 'tools.get_possible_field_params_spectra', (['sim'], {}), '(sim)\n', (864, 869), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((909, 980), 'pyGDM2.tools.calculate_spectrum', 'tools.calculate_spectrum', (['sim', 'field_kwargs[config_idx]', 'linear.extinct'], {}), '(sim, field_kwargs[config_idx], linear.extinct)\n', (933, 980), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((1035, 1056), 'numpy.max', 'np.max', (['spectrum.T[2]'], {}), '(spectrum.T[2])\n', (1041, 1056), True, 'import numpy as np\n'), ((2015, 2056), 'pyGDM2.structures.sphere', 'structures.sphere', (['step'], {'R': 'r', 'mesh': '"""cube"""'}), "(step, R=r, mesh='cube')\n", (2032, 2056), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((2077, 2172), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 1], [0, 0, -1], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0]\n ]'], {}), '([[0, 0, 0], [0, 0, 1], [0, 0, -1], [1, 0, 0], [-1, 0, 0], [0, 1, 0\n ], [0, -1, 0]])\n', (2085, 2172), True, 'import numpy as np\n'), ((2441, 2468), 'numpy.vstack', 'np.vstack', (['(sample, sphere)'], {}), '((sample, sphere))\n', (2450, 2468), True, 'import numpy as np\n'), ((1913, 1931), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (1928, 1931), True, 'import numpy as np\n')]
|
import logging
log = logging.getLogger(__name__)
class Codec(object):
"""Codec."""
def __init__(self):
pass
def compress(self, stream):
"""Compress.
:param io.BytesIO stream: Uncompressed input stream.
:rtype: io.BytesIO
"""
return stream
def decompress(self, stream):
"""Decompress.
:param io.BytesIO stream: Compressed input stream.
:rtype: io.BytesIO
"""
return stream
|
[
"logging.getLogger"
] |
[((22, 49), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (39, 49), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 16:14:46 2020
@author: Dripta
"""
from covidindia import *
import os
import pickle
file_path = os.path.dirname(os.path.abspath(__file__))
data_path=os.path.join(file_path,'static','server_data')
with open(os.path.join(file_path,'updater.txt'),'a') as f:
f.write('Update Started\nGathering Data..\n')
init = initializer(silent=True)
with open(os.path.join(data_path, 'init.pkl'), 'wb') as init_file:
pickle.dump(init, init_file)
filter_data = Data(init)
with open(os.path.join(data_path, 'filter_data.pkl'), 'wb') as filter_file:
pickle.dump(filter_data, filter_file)
f.write('Gathering Demographic Data...\n')
demo = Demographic_overview(init, silent=True)
with open(os.path.join(data_path, 'demo.pkl'), 'wb') as demo_file:
pickle.dump(demo, demo_file)
f.write('Gathering tested data...\n')
tested_df = filter_data.tested_subject_data()
tested_df.to_csv(os.path.join(data_path, 'tested_data.csv'), index=False)
f.write('Update Done.')
f.close()
|
[
"os.path.abspath",
"os.path.join",
"pickle.dump"
] |
[((202, 250), 'os.path.join', 'os.path.join', (['file_path', '"""static"""', '"""server_data"""'], {}), "(file_path, 'static', 'server_data')\n", (214, 250), False, 'import os\n'), ((164, 189), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (179, 189), False, 'import os\n'), ((261, 299), 'os.path.join', 'os.path.join', (['file_path', '"""updater.txt"""'], {}), "(file_path, 'updater.txt')\n", (273, 299), False, 'import os\n'), ((475, 503), 'pickle.dump', 'pickle.dump', (['init', 'init_file'], {}), '(init, init_file)\n', (486, 503), False, 'import pickle\n'), ((621, 658), 'pickle.dump', 'pickle.dump', (['filter_data', 'filter_file'], {}), '(filter_data, filter_file)\n', (632, 658), False, 'import pickle\n'), ((836, 864), 'pickle.dump', 'pickle.dump', (['demo', 'demo_file'], {}), '(demo, demo_file)\n', (847, 864), False, 'import pickle\n'), ((978, 1020), 'os.path.join', 'os.path.join', (['data_path', '"""tested_data.csv"""'], {}), "(data_path, 'tested_data.csv')\n", (990, 1020), False, 'import os\n'), ((410, 445), 'os.path.join', 'os.path.join', (['data_path', '"""init.pkl"""'], {}), "(data_path, 'init.pkl')\n", (422, 445), False, 'import os\n'), ((547, 589), 'os.path.join', 'os.path.join', (['data_path', '"""filter_data.pkl"""'], {}), "(data_path, 'filter_data.pkl')\n", (559, 589), False, 'import os\n'), ((771, 806), 'os.path.join', 'os.path.join', (['data_path', '"""demo.pkl"""'], {}), "(data_path, 'demo.pkl')\n", (783, 806), False, 'import os\n')]
|
from typing import List
from aiohttp import web
from openapi.db.path import ApiPath, SqlApiPath
from openapi.exc import JsonHttpException
from openapi.spec import op
from .models import (
MultiKey,
MultiKeyUnique,
SourcePrice,
Task,
TaskAdd,
TaskOrderableQuery,
TaskPathSchema,
TaskPathSchema2,
TaskQuery,
TaskUpdate,
)
additional_routes = web.RouteTableDef()
invalid_path_routes = web.RouteTableDef()
invalid_method_description_routes = web.RouteTableDef()
invalid_method_summary_routes = web.RouteTableDef()
invalid_method_description_routes = web.RouteTableDef()
invalid_tag_missing_description_routes = web.RouteTableDef()
@additional_routes.view("/bulk/tasks")
class TaskBulkPath(SqlApiPath):
"""
---
summary: Bulk manage tasks
tags:
- Task
"""
table = "tasks"
@op(body_schema=List[TaskAdd], response_schema=List[Task])
async def post(self):
"""
---
summary: Create Tasks
description: Create a group of Tasks
responses:
201:
description: Created tasks
"""
data = await self.create_list()
return self.json_response(data, status=201)
@additional_routes.view("/transaction/tasks")
class TaskTransactionsPath(SqlApiPath):
"""
---
summary: Manage tasks with transactions
tags:
- Task
- name: Transaction
description: Endpoints that creates a new transaction
"""
table = "tasks"
@op(body_schema=TaskAdd, response_schema=Task)
async def post(self):
"""
---
summary: Create Task
description: Create a Task using transatcion
responses:
201:
description: Created Task
500:
description: Forced raised error
"""
data = await self.json_data()
async with self.db.transaction() as conn:
should_raise = data.pop("should_raise", False)
task = await self.create_one(data=data, conn=conn)
if should_raise:
raise JsonHttpException(status=500)
return self.json_response(data=task, status=201)
@op(query_schema=TaskOrderableQuery, response_schema=List[Task])
async def get(self):
"""
---
summary: Retrieve Tasks
description: Retrieve a list of Tasks using transaction
responses:
200:
description: Authenticated tasks
"""
paginated = await self.get_list()
return paginated.json_response()
@additional_routes.view("/transaction/tasks/{id}")
class TaskTransactionPath(SqlApiPath):
"""
---
summary: Manage Tasks with transactions
tags:
- Task
- Transaction
"""
table = "tasks"
path_schema = TaskPathSchema
@op(response_schema=Task)
async def get(self):
"""
---
summary: Retrieve Task
description: Retrieve an existing Task by ID using transaction
responses:
200:
description: the task
"""
async with self.db.transaction() as conn:
data = await self.get_one(conn=conn)
return self.json_response(data)
@op(body_schema=TaskUpdate, response_schema=Task)
async def patch(self):
"""
---
summary: Update Task
description: Update an existing Task by ID using transaction
responses:
200:
description: the updated task
"""
data = await self.json_data()
async with self.db.transaction() as conn:
should_raise = data.pop("should_raise", False)
task = await self.update_one(data=data, conn=conn)
if should_raise:
raise JsonHttpException(status=500)
return self.json_response(data=task, status=200)
@op()
async def delete(self):
"""
---
summary: Delete Task
description: Delete an existing task using transaction
responses:
204:
description: Task successfully deleted
"""
data = await self.json_data()
async with self.db.transaction() as conn:
should_raise = data.pop("should_raise", False)
await self.delete_one(conn=conn)
if should_raise:
raise JsonHttpException(status=500)
return self.json_response(data={}, status=204)
@additional_routes.view("/transaction/bulk/tasks")
class TaskBulkTransactionPath(SqlApiPath):
"""
---
summary: Bulk manage tasks with transactions
tags:
- Task
- Transaction
"""
table = "tasks"
@op(query_schema=TaskQuery)
async def delete(self):
"""
---
summary: Delete Tasks
description: Bulk delete a group of Tasks using transaction
responses:
204:
description: Tasks successfully deleted
"""
async with self.db.transaction() as conn:
await self.delete_list(query=dict(self.request.query), conn=conn)
return web.Response(status=204)
@op(body_schema=List[TaskAdd], response_schema=List[Task])
async def post(self):
"""
---
summary: Create Tasks
description: Bulk create Tasks using transaction
responses:
201:
description: created tasks
"""
async with self.db.transaction() as conn:
data = await self.create_list(conn=conn)
return self.json_response(data, status=201)
@additional_routes.view("/tasks2/{task_id}")
class TaskPath2(SqlApiPath):
"""
---
tags:
- Task
"""
table = "tasks"
path_schema = TaskPathSchema2
def get_filters(self):
filters = super().get_filters()
return {"id": filters["task_id"]}
@op(response_schema=Task)
async def get(self):
"""
---
summary: Retrieve a Task
description: Retrieve an existing Task by ID
responses:
200:
description: the task
"""
data = await self.get_one(filters=self.get_filters())
return self.json_response(data)
@op(response_schema=Task, body_schema=TaskUpdate)
async def patch(self):
"""
---
summary: Update a Task
description: Update an existing Task by ID
responses:
200:
description: the updated task
"""
data = await self.update_one(filters=self.get_filters())
return self.json_response(data)
@op()
async def delete(self):
"""
---
summary: Delete a Task
description: Delete an existing Task
responses:
204:
description: Task successfully deleted
"""
await self.delete_one(filters=self.get_filters())
return web.Response(status=204)
@additional_routes.view("/simple-list")
class SipleList(ApiPath):
"""
---
tags:
- Task
"""
@op(response_schema=List[int])
async def get(self):
"""
---
summary: Retrieve a list of integer
description: list of simple integers
responses:
200:
description: list
"""
return self.json_response([2, 4, 5])
@additional_routes.view("/multikey")
class MultiKeyPath(SqlApiPath):
"""
---
summary: Create rows in multikey constraint table
tags:
- name: Multikey
description: several keys
"""
table = "multi_key"
@op(response_schema=MultiKey, body_schema=MultiKey)
async def post(self):
"""
---
summary: Create row in multi-column constrained table
description: Create row in multi-column constrained table
responses:
201:
description: New row
"""
data = await self.create_one()
return self.json_response(data, status=201)
@op(response_schema=List[MultiKey])
async def get(self):
"""
---
summary: List multi-column constrained items
description: List multi-column constrained items
responses:
200:
description: List of items
"""
paginated = await self.get_list()
return paginated.json_response()
@additional_routes.view("/multikey-unique")
class MultiKeyUniquePath(SqlApiPath):
"""
---
summary: Create rows in multikey constraint table
tags:
- Multikey
"""
table = "multi_key_unique"
@op(response_schema=MultiKeyUnique, body_schema=MultiKeyUnique)
async def post(self):
"""
---
summary: Create row in multi-column constrained table
description: Create row in multi-column constrained table
responses:
201:
description: New row
"""
data = await self.create_one()
return self.json_response(data, status=201)
@op(response_schema=List[MultiKeyUnique])
async def get(self):
"""
---
summary: List multi-column constrained items
description: List multi-column constrained items
responses:
200:
description: List of items
"""
paginated = await self.get_list()
return paginated.json_response()
@additional_routes.view("/sources")
class SourcePath(ApiPath):
"""
---
summary: Sources
tags:
- name: Sources
description: Sources
"""
@op(response_schema=List[SourcePrice])
async def get(self):
"""
---
summary: List sources
description: List sources
responses:
200:
description: List of sources
"""
return self.json_response([])
@invalid_path_routes.view("/tasks")
class NoTagsTaskPath(SqlApiPath):
"""
---
"""
pass
@invalid_method_summary_routes.view("/tasks")
class NoSummaryMethodPath(SqlApiPath):
"""
---
tags:
- Tag
"""
@op(response_schema=List[Task])
def get(self):
"""
---
description: Valid method description
responses:
200:
description: Valid response description
"""
pass
@invalid_method_description_routes.view("/tasks")
class NoDescriptionMethodPath(SqlApiPath):
"""
---
tags:
- Tag
"""
@op(response_schema=List[Task])
def get(self):
"""
---
summary: Valid method summary
responses:
200:
description: Valid response description
"""
pass
@invalid_tag_missing_description_routes.view("/tasks")
class NoTagDescriptionPath(SqlApiPath):
""" "
---
tags:
- name: Task
description: Simple description
- Random
"""
pass
|
[
"aiohttp.web.RouteTableDef",
"openapi.exc.JsonHttpException",
"aiohttp.web.Response",
"openapi.spec.op"
] |
[((384, 403), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (401, 403), False, 'from aiohttp import web\n'), ((426, 445), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (443, 445), False, 'from aiohttp import web\n'), ((482, 501), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (499, 501), False, 'from aiohttp import web\n'), ((534, 553), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (551, 553), False, 'from aiohttp import web\n'), ((590, 609), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (607, 609), False, 'from aiohttp import web\n'), ((651, 670), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (668, 670), False, 'from aiohttp import web\n'), ((851, 908), 'openapi.spec.op', 'op', ([], {'body_schema': 'List[TaskAdd]', 'response_schema': 'List[Task]'}), '(body_schema=List[TaskAdd], response_schema=List[Task])\n', (853, 908), False, 'from openapi.spec import op\n'), ((1517, 1562), 'openapi.spec.op', 'op', ([], {'body_schema': 'TaskAdd', 'response_schema': 'Task'}), '(body_schema=TaskAdd, response_schema=Task)\n', (1519, 1562), False, 'from openapi.spec import op\n'), ((2212, 2275), 'openapi.spec.op', 'op', ([], {'query_schema': 'TaskOrderableQuery', 'response_schema': 'List[Task]'}), '(query_schema=TaskOrderableQuery, response_schema=List[Task])\n', (2214, 2275), False, 'from openapi.spec import op\n'), ((2868, 2892), 'openapi.spec.op', 'op', ([], {'response_schema': 'Task'}), '(response_schema=Task)\n', (2870, 2892), False, 'from openapi.spec import op\n'), ((3279, 3327), 'openapi.spec.op', 'op', ([], {'body_schema': 'TaskUpdate', 'response_schema': 'Task'}), '(body_schema=TaskUpdate, response_schema=Task)\n', (3281, 3327), False, 'from openapi.spec import op\n'), ((3932, 3936), 'openapi.spec.op', 'op', ([], {}), '()\n', (3934, 3936), False, 'from openapi.spec import op\n'), ((4762, 4788), 'openapi.spec.op', 'op', ([], {'query_schema': 'TaskQuery'}), '(query_schema=TaskQuery)\n', (4764, 4788), False, 'from openapi.spec import op\n'), ((5221, 5278), 'openapi.spec.op', 'op', ([], {'body_schema': 'List[TaskAdd]', 'response_schema': 'List[Task]'}), '(body_schema=List[TaskAdd], response_schema=List[Task])\n', (5223, 5278), False, 'from openapi.spec import op\n'), ((5962, 5986), 'openapi.spec.op', 'op', ([], {'response_schema': 'Task'}), '(response_schema=Task)\n', (5964, 5986), False, 'from openapi.spec import op\n'), ((6316, 6364), 'openapi.spec.op', 'op', ([], {'response_schema': 'Task', 'body_schema': 'TaskUpdate'}), '(response_schema=Task, body_schema=TaskUpdate)\n', (6318, 6364), False, 'from openapi.spec import op\n'), ((6703, 6707), 'openapi.spec.op', 'op', ([], {}), '()\n', (6705, 6707), False, 'from openapi.spec import op\n'), ((7160, 7189), 'openapi.spec.op', 'op', ([], {'response_schema': 'List[int]'}), '(response_schema=List[int])\n', (7162, 7189), False, 'from openapi.spec import op\n'), ((7706, 7756), 'openapi.spec.op', 'op', ([], {'response_schema': 'MultiKey', 'body_schema': 'MultiKey'}), '(response_schema=MultiKey, body_schema=MultiKey)\n', (7708, 7756), False, 'from openapi.spec import op\n'), ((8117, 8151), 'openapi.spec.op', 'op', ([], {'response_schema': 'List[MultiKey]'}), '(response_schema=List[MultiKey])\n', (8119, 8151), False, 'from openapi.spec import op\n'), ((8714, 8776), 'openapi.spec.op', 'op', ([], {'response_schema': 'MultiKeyUnique', 'body_schema': 'MultiKeyUnique'}), '(response_schema=MultiKeyUnique, body_schema=MultiKeyUnique)\n', (8716, 8776), False, 'from openapi.spec import op\n'), ((9137, 9177), 'openapi.spec.op', 'op', ([], {'response_schema': 'List[MultiKeyUnique]'}), '(response_schema=List[MultiKeyUnique])\n', (9139, 9177), False, 'from openapi.spec import op\n'), ((9692, 9729), 'openapi.spec.op', 'op', ([], {'response_schema': 'List[SourcePrice]'}), '(response_schema=List[SourcePrice])\n', (9694, 9729), False, 'from openapi.spec import op\n'), ((10221, 10251), 'openapi.spec.op', 'op', ([], {'response_schema': 'List[Task]'}), '(response_schema=List[Task])\n', (10223, 10251), False, 'from openapi.spec import op\n'), ((10607, 10637), 'openapi.spec.op', 'op', ([], {'response_schema': 'List[Task]'}), '(response_schema=List[Task])\n', (10609, 10637), False, 'from openapi.spec import op\n'), ((7012, 7036), 'aiohttp.web.Response', 'web.Response', ([], {'status': '(204)'}), '(status=204)\n', (7024, 7036), False, 'from aiohttp import web\n'), ((5190, 5214), 'aiohttp.web.Response', 'web.Response', ([], {'status': '(204)'}), '(status=204)\n', (5202, 5214), False, 'from aiohttp import web\n'), ((2114, 2143), 'openapi.exc.JsonHttpException', 'JsonHttpException', ([], {'status': '(500)'}), '(status=500)\n', (2131, 2143), False, 'from openapi.exc import JsonHttpException\n'), ((3834, 3863), 'openapi.exc.JsonHttpException', 'JsonHttpException', ([], {'status': '(500)'}), '(status=500)\n', (3851, 3863), False, 'from openapi.exc import JsonHttpException\n'), ((4429, 4458), 'openapi.exc.JsonHttpException', 'JsonHttpException', ([], {'status': '(500)'}), '(status=500)\n', (4446, 4458), False, 'from openapi.exc import JsonHttpException\n')]
|
import os
count = 0
with open('listHis', 'w') as f:
for root, dirs, files in os.walk('.'):
tzt = [file for file in files if 'His.csv.gz' in file]
for tztfile in tzt:
p = os.path.join(root, tztfile)
f.write(p) ; f.write('\n')
count += 1
if count % 1000 == 0:
print(p)
|
[
"os.walk",
"os.path.join"
] |
[((81, 93), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (88, 93), False, 'import os\n'), ((202, 229), 'os.path.join', 'os.path.join', (['root', 'tztfile'], {}), '(root, tztfile)\n', (214, 229), False, 'import os\n')]
|
from django.contrib import admin
from dfirtrack_artifacts.models import (
Artifact,
Artifactpriority,
Artifactstatus,
Artifacttype,
)
# Register your models here.
admin.site.register(Artifact)
admin.site.register(Artifactpriority)
admin.site.register(Artifactstatus)
admin.site.register(Artifacttype)
|
[
"django.contrib.admin.site.register"
] |
[((181, 210), 'django.contrib.admin.site.register', 'admin.site.register', (['Artifact'], {}), '(Artifact)\n', (200, 210), False, 'from django.contrib import admin\n'), ((211, 248), 'django.contrib.admin.site.register', 'admin.site.register', (['Artifactpriority'], {}), '(Artifactpriority)\n', (230, 248), False, 'from django.contrib import admin\n'), ((249, 284), 'django.contrib.admin.site.register', 'admin.site.register', (['Artifactstatus'], {}), '(Artifactstatus)\n', (268, 284), False, 'from django.contrib import admin\n'), ((285, 318), 'django.contrib.admin.site.register', 'admin.site.register', (['Artifacttype'], {}), '(Artifacttype)\n', (304, 318), False, 'from django.contrib import admin\n')]
|
import urllib.request
url = 'http://www.ifce.edu.br'
# Obter o conteúdo da página
pagina = urllib.request.urlopen(url)
texto1 = pagina.read().decode('utf-8')
# Outra forma de fazer a mesma coisa ..
import requests
page = requests.get(url)
texto2 = page.content.decode('utf-8')
# Verificamos que todas as linhas são iguais
print(texto1.split('\n') == texto2.split('\n'))
|
[
"requests.get"
] |
[((224, 241), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (236, 241), False, 'import requests\n')]
|
from dataclasses import dataclass, field
__NAMESPACE__ = "http://www.opengis.net/ows"
@dataclass
class Title1:
"""
Title of this resource, normally used for display to a human.
"""
class Meta:
name = "Title"
namespace = "http://www.opengis.net/ows"
value: str = field(
default="",
metadata={
"required": True,
},
)
|
[
"dataclasses.field"
] |
[((303, 349), 'dataclasses.field', 'field', ([], {'default': '""""""', 'metadata': "{'required': True}"}), "(default='', metadata={'required': True})\n", (308, 349), False, 'from dataclasses import dataclass, field\n')]
|
# -*- coding: utf-8 -*-
"""
Inputs
#################
*Module* ``project.user.inputs``
This module defines routes to manage input new inputs of projects from users.
"""
import re, boto3, string
from flask import render_template, redirect, url_for, Response, current_app
from flask_login import login_required, current_user
from . import user_app
from .forms import ProjectInformationForm
from .helpers import upload_file, generate_keyword, convert_into_seconds
from .. import db
from ..models import Project, Annotator, Batch, Tuple, Item
# User - Upload project
@user_app.route('/upload-project', methods=['GET','POST'])
@login_required
def upload_project():
"""
Provide information of a new project from user at ``/user/upload-project``.
Returns:
user profile page at ``/user/<some_name>`` if new valid project is submitted.
Note:
Upload project on Mechanical Turk Platform or use local annotator system.
Error:
Error message emerges if there are invalid fields or there is no logged in user.
"""
# information about project
project_form = ProjectInformationForm()
if project_form.validate_on_submit():
# get data from uploaded file
data = upload_file(project_form.upload.data)
# check if user uploaded empty validated file(s)
if not data:
project_form.upload.errors.append(u'You uploaded only empty file(s)!')
return render_template('user/upload-project.html', form=project_form,
name=current_user.username)
# check if user uploaded too few items
elif data == 1:
project_form.upload.errors.append(u'There are fewer than 5 items!')
return render_template('user/upload-project.html', form=project_form,
name=current_user.username)
# get project name
# if no name was given, then this project has the name 'project - <project-id>'
if not project_form.name.data:
name = 'project-%d'%(len(Project.query.all()))
else:
name = project_form.name.data.strip()
# add link to project page for user to view project information
p_name = ("%s"%(re.sub('[^\w]+', '-', name))).strip('-').strip('_')
# if this name exists already (more than one user have the same project name)
if Project.query.filter_by(name=name).first():
# create link for this new project by adding its id
p_name = '%s-%d'%(p_name,len(Project.query.all()))
# rename the project of this user uses the same name for more than
# one of his project (which rarely happens)
while Project.query.filter_by(name=name, user=current_user).first():
name = '%s (%d)'%(name,len([project for project in current_user.projects \
if project.name==name]))
# if name of the project is unique but the link exists (p_name),
# hope this will happen only once, but to make sure
while Project.query.filter_by(p_name=p_name).first():
p_name = '%s-%d'%(p_name,len(Project.query.all()))
# add new project
current_project = Project(name=name, description=project_form.description.data, \
anno_number=project_form.anno_number.data, \
best_def=project_form.best_def.data, worst_def=project_form.worst_def.data, \
n_items=len(data.items), user=current_user, p_name=p_name, \
mturk=project_form.mturk.data)
# user wants to upload this project on Mechanical Turk Market
if project_form.mturk.data:
# use the aws_access_key_id and aws_secret_access_key given from user
# if this is not found in configuration
aws_access_key_id = current_app.config['AWS_ACCESS_KEY_ID'] \
if current_app.config['AWS_ACCESS_KEY_ID'] \
else project_form.aws_access_key_id.data
aws_secret_access_key = current_app.config['AWS_SECRET_ACCESS_KEY'] \
if current_app.config['AWS_SECRET_ACCESS_KEY'] \
else project_form.aws_secret_access_key.data
# check if the user uses default values which never exists
check = []
if aws_access_key_id == project_form.aws_access_key_id.default:
project_form.aws_access_key_id.errors.append("""You must specify your own
aws_access_key_id, default does not exist!""")
check.append(True)
if aws_secret_access_key == project_form.aws_secret_access_key.default:
project_form.aws_secret_access_key.errors.append("""You must specify your own
aws_secret_access_key, default does not exist!""")
check.append(True)
if any(check):
return render_template('user/upload-project.html', form=project_form,
name=current_user.username)
mturk = boto3.client(service_name='mturk',
aws_access_key_id = aws_access_key_id,
aws_secret_access_key = aws_secret_access_key,
region_name='us-east-1',
endpoint_url = current_app.config['MTURK_URL'])
# define endpoint to a HIT using generated hit_id
hit_ids = set()
hit_code = generate_keyword(chars=string.ascii_letters, k_length=3)
# user wants to choose annotators themselves (they want to use our local system)
else:
# add keywords for annotators in local system
for num_anno in range(project_form.anno_number.data):
new_keyword = generate_keyword()
# make sure the new created keyword is never used for any annotator of any project
while Annotator.query.filter_by(keyword=new_keyword).first():
new_keyword = generate_keyword()
# add new key word
Annotator(keyword=new_keyword, project=current_project)
# add batches, tuples and items
for i, tuples_ in data.batches.items():
# create keyword for each batch to upload this project on Mechanical Turk Market
if project_form.mturk.data:
new_keyword = generate_keyword()
# make sure the new created keyword is never used for any batch of any project
while Batch.query.filter_by(keyword=new_keyword).first():
new_keyword = generate_keyword()
# create this HIT on MTurk
# create HIT_ID for the batch in local system (has nothing to do with HITID on MTurk)
new_hit_id = hit_code+generate_keyword(chars=string.digits)
while new_hit_id in hit_ids:
new_hit_id = hit_code+generate_keyword(chars=string.digits)
hit_ids.add(new_hit_id)
# get url for the hit to save on corresponding one on MTurk
url = url_for('mturk.hit', p_name=p_name, hit_id=new_hit_id, _external=True)
# define the questions.xml template with the type box for keyword
response = Response(render_template('questions.xml', title=project_form.name.data,
description=project_form.description.data, url=url),
mimetype='text/plain')
response.implicit_sequence_conversion = False
question = response.get_data(as_text=True)
# get information from user for creating hit on MTurk
p_keyword = project_form.keywords.data
p_reward = project_form.reward.data
lifetime = convert_into_seconds(duration=project_form.lifetime.data, \
unit=project_form.lifetimeunit.data)
hit_duration = convert_into_seconds(duration=project_form.hit_duration.data, \
unit=project_form.duration_unit.data)
# create new hit on MTurk
new_hit = mturk.create_hit(
Title = project_form.name.data,
Description = project_form.description.data,
Keywords = p_keyword,
Reward = p_reward,
MaxAssignments = project_form.anno_number.data,
LifetimeInSeconds = lifetime,
AssignmentDurationInSeconds = hit_duration,
Question = question,
AssignmentReviewPolicy = {
'PolicyName':'ScoreMyKnownAnswers/2011-09-01',
'Parameters': [
{'Key':'AnswerKey',
'MapEntries':[{ 'Key':'keyword',
'Values':[new_keyword]
}]
},
{'Key':'ApproveIfKnownAnswerScoreIsAtLeast',
'Values':['1']
},
{'Key':'RejectIfKnownAnswerScoreIsLessThan',
'Values':['1']
},
{'Key':'RejectReason',
'Values':['''Sorry, we could not approve your submission
as you did not type in the right keyword.''']
}
]
}
)
# no need to create keyword and hit_id for batch as this is for the local process
else:
new_keyword = new_hit_id = None
# add new batch
current_batch = Batch(project=current_project, size=len(tuples_),
keyword=new_keyword, hit_id=new_hit_id)
for tuple_ in tuples_:
# add new tuple
current_tuple = Tuple(batch=current_batch)
for item in tuple_:
# check if this item is already saved in the database
if Item.query.filter_by(item=item).first():
current_tuple.items.append(Item.query.filter_by(item=item).first())
else:
new_item = Item(item=item)
current_tuple.items.append(new_item)
db.session.commit()
return redirect(url_for('user.profile', some_name=current_user.username))
return render_template('user/upload-project.html', form=project_form, name=current_user.username)
|
[
"flask.url_for",
"re.sub",
"flask.render_template",
"boto3.client"
] |
[((8916, 9011), 'flask.render_template', 'render_template', (['"""user/upload-project.html"""'], {'form': 'project_form', 'name': 'current_user.username'}), "('user/upload-project.html', form=project_form, name=\n current_user.username)\n", (8931, 9011), False, 'from flask import render_template, redirect, url_for, Response, current_app\n'), ((1367, 1462), 'flask.render_template', 'render_template', (['"""user/upload-project.html"""'], {'form': 'project_form', 'name': 'current_user.username'}), "('user/upload-project.html', form=project_form, name=\n current_user.username)\n", (1382, 1462), False, 'from flask import render_template, redirect, url_for, Response, current_app\n'), ((4485, 4680), 'boto3.client', 'boto3.client', ([], {'service_name': '"""mturk"""', 'aws_access_key_id': 'aws_access_key_id', 'aws_secret_access_key': 'aws_secret_access_key', 'region_name': '"""us-east-1"""', 'endpoint_url': "current_app.config['MTURK_URL']"}), "(service_name='mturk', aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key, region_name='us-east-1',\n endpoint_url=current_app.config['MTURK_URL'])\n", (4497, 4680), False, 'import re, boto3, string\n'), ((8847, 8903), 'flask.url_for', 'url_for', (['"""user.profile"""'], {'some_name': 'current_user.username'}), "('user.profile', some_name=current_user.username)\n", (8854, 8903), False, 'from flask import render_template, redirect, url_for, Response, current_app\n'), ((1609, 1704), 'flask.render_template', 'render_template', (['"""user/upload-project.html"""'], {'form': 'project_form', 'name': 'current_user.username'}), "('user/upload-project.html', form=project_form, name=\n current_user.username)\n", (1624, 1704), False, 'from flask import render_template, redirect, url_for, Response, current_app\n'), ((4369, 4464), 'flask.render_template', 'render_template', (['"""user/upload-project.html"""'], {'form': 'project_form', 'name': 'current_user.username'}), "('user/upload-project.html', form=project_form, name=\n current_user.username)\n", (4384, 4464), False, 'from flask import render_template, redirect, url_for, Response, current_app\n'), ((6165, 6235), 'flask.url_for', 'url_for', (['"""mturk.hit"""'], {'p_name': 'p_name', 'hit_id': 'new_hit_id', '_external': '(True)'}), "('mturk.hit', p_name=p_name, hit_id=new_hit_id, _external=True)\n", (6172, 6235), False, 'from flask import render_template, redirect, url_for, Response, current_app\n'), ((6331, 6450), 'flask.render_template', 'render_template', (['"""questions.xml"""'], {'title': 'project_form.name.data', 'description': 'project_form.description.data', 'url': 'url'}), "('questions.xml', title=project_form.name.data, description=\n project_form.description.data, url=url)\n", (6346, 6450), False, 'from flask import render_template, redirect, url_for, Response, current_app\n'), ((2031, 2059), 're.sub', 're.sub', (['"""[^\\\\w]+"""', '"""-"""', 'name'], {}), "('[^\\\\w]+', '-', name)\n", (2037, 2059), False, 'import re, boto3, string\n')]
|
#!/usr/bin/env python3
import subprocess
from ruamel.yaml import YAML
import glob
import re
import argparse
class Semver:
def __init__(self, major: int, minor: int, patch: int):
self.major = major
self.minor = minor
self.patch = patch
def incr_major(self):
self.major = self.major + 1
self.patch = 0
self.minor = 0
def incr_minor(self):
self.minor = self.minor + 1
self.patch = 0
def incr_patch(self):
self.patch = self.patch + 1
def to_string(self) -> str:
return F'{self.major}.{self.minor}.{self.patch}'
@classmethod
def parse(self, input_str: str):
# Parse and validate, return new instance of Semver
if re.fullmatch(r'^([0-9]+)\.([0-9]+)\.([0-9]+)$', input_str):
split_list = input_str.split('.')
split_list = [int(i) for i in split_list]
return self(*split_list)
raise Exception(F'Invalid input version value: {input_str}')
def update_charts(update_func):
yaml = YAML()
yaml.indent(mapping = 2, sequence=4, offset=2)
main_dir = subprocess.run(["git", "rev-parse", "--show-toplevel"], check=True, stdout=subprocess.PIPE).stdout.strip().decode('utf-8')
search_path = F'{main_dir}/charts/**/Chart.yaml'
for path in glob.glob(search_path, recursive=True):
if re.match('^.*cass-operator.*', path):
continue
with open(path) as f:
chart = yaml.load(f)
semver = Semver.parse(chart['version'])
update_func(semver)
chart['version'] = semver.to_string()
with open(path, 'w') as f:
yaml.dump(chart, f)
print(F'Updated {path} to {semver.to_string()}')
def main():
parser = argparse.ArgumentParser(description='Update Helm chart versions in k8ssandra project')
parser.add_argument('--incr', choices=['major', 'minor', 'patch'], help='increase part of semver by one')
args = parser.parse_args()
if args.incr:
if args.incr == 'major':
update_charts(Semver.incr_major)
elif args.incr == 'minor':
update_charts(Semver.incr_minor)
elif args.incr == 'patch':
update_charts(Semver.incr_patch)
if __name__ == "__main__":
main()
|
[
"subprocess.run",
"re.fullmatch",
"argparse.ArgumentParser",
"re.match",
"ruamel.yaml.YAML",
"glob.glob"
] |
[((1050, 1056), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (1054, 1056), False, 'from ruamel.yaml import YAML\n'), ((1315, 1353), 'glob.glob', 'glob.glob', (['search_path'], {'recursive': '(True)'}), '(search_path, recursive=True)\n', (1324, 1353), False, 'import glob\n'), ((1773, 1864), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Update Helm chart versions in k8ssandra project"""'}), "(description=\n 'Update Helm chart versions in k8ssandra project')\n", (1796, 1864), False, 'import argparse\n'), ((738, 797), 're.fullmatch', 're.fullmatch', (['"""^([0-9]+)\\\\.([0-9]+)\\\\.([0-9]+)$"""', 'input_str'], {}), "('^([0-9]+)\\\\.([0-9]+)\\\\.([0-9]+)$', input_str)\n", (750, 797), False, 'import re\n'), ((1367, 1403), 're.match', 're.match', (['"""^.*cass-operator.*"""', 'path'], {}), "('^.*cass-operator.*', path)\n", (1375, 1403), False, 'import re\n'), ((1123, 1219), 'subprocess.run', 'subprocess.run', (["['git', 'rev-parse', '--show-toplevel']"], {'check': '(True)', 'stdout': 'subprocess.PIPE'}), "(['git', 'rev-parse', '--show-toplevel'], check=True, stdout=\n subprocess.PIPE)\n", (1137, 1219), False, 'import subprocess\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import requests
import logging
import time
import re
logger = logging.getLogger('data')
USER_AGENT = "<NAME> (for academic research) <<EMAIL>>"
default_requests_session = requests.Session()
default_requests_session.headers['User-Agent'] = USER_AGENT
def make_request(method, *args, **kwargs):
# We read the max_attempts and retry_delay arguments from the kwargs dictionary
# instead of named kwargs because we want to preserve the order of the
# "request" method's positional arguments for clients of this method.
max_attempts = kwargs.get('max_attempts', 2)
retry_delay = kwargs.get('retry_delay', 10)
try_again = True
attempts = 0
res = None
def log_error(err_msg):
logger.warning(
"Error (%s) For API call %s, Args: %s, Kwargs: %s",
str(err_msg), str(method), str(args), str(kwargs)
)
while try_again and attempts < max_attempts:
try:
res = method(*args, **kwargs)
if hasattr(res, 'status_code') and res.status_code not in [200]:
log_error(str(res.status_code))
res = None
try_again = False
except requests.exceptions.ConnectionError:
log_error("ConnectionError")
except requests.exceptions.ReadTimeout:
log_error("ReadTimeout")
if try_again:
logger.warning("Waiting %d seconds for before retrying.", int(retry_delay))
time.sleep(retry_delay)
attempts += 1
return res
def _get_mendeley_item_count(response):
'''
Returns and integer if a count was found, otherwise returns None.
See https://dev.mendeley.com/reference/topics/pagination.html for more documentation.
'''
if 'mendeley-count' not in response.headers:
return None
count = response.headers['mendeley-count']
count_int = None
try:
count_int = int(count)
except ValueError as e:
logger.warning("Unexpected item count %s: %s", count, e)
return count_int
def _get_next_page_url(response):
# If there is no "Link" header, then there is no next page
header = None
if 'Link' in response.headers:
header = response.headers['Link']
elif 'link' in response.headers:
header = response.headers['link']
if header is None:
return None
# Extract the next URL from the Link header.
next_url = None
next_url_match = re.search("<([^>]*)>; rel=\"next\"", header)
if next_url_match is not None:
next_url = next_url_match.group(1)
return next_url
|
[
"requests.Session",
"re.search",
"logging.getLogger",
"time.sleep"
] |
[((152, 177), 'logging.getLogger', 'logging.getLogger', (['"""data"""'], {}), "('data')\n", (169, 177), False, 'import logging\n'), ((263, 281), 'requests.Session', 'requests.Session', ([], {}), '()\n', (279, 281), False, 'import requests\n'), ((2537, 2579), 're.search', 're.search', (['"""<([^>]*)>; rel="next\\""""', 'header'], {}), '(\'<([^>]*)>; rel="next"\', header)\n', (2546, 2579), False, 'import re\n'), ((1550, 1573), 'time.sleep', 'time.sleep', (['retry_delay'], {}), '(retry_delay)\n', (1560, 1573), False, 'import time\n')]
|
import numpy as np
with open("data.txt") as f:
draws = np.array([int(d) for d in f.readline().split(",")])
boards = np.array([[[int(n) for n in r.split()] for r in b.split("\n")] for b in f.read()[1:].split("\n\n")])
def bingo(data: np.ndarray, fill: int):
"""
Returns horizontal (rows) and vertical (columns) bingo. TRUE if bingo. FALSE if not.
"""
transposed_data = np.transpose(data)
return any(np.equal(data, [fill for _ in range(5)]).all(1)) or \
any(np.equal(transposed_data, [fill for _ in range(5)]).all(1))
def one(d_data: np.ndarray, b_data: np.ndarray) -> int:
"""
To guarantee victory against the giant squid, figure out which board will win first.
What will your final score be if you choose that board?
"""
# If number is drawn, replace with {fill}
fill = -1
for draw in d_data:
# Replace drawn number by -1
b_data = np.where(b_data == draw, fill, b_data)
for board in b_data:
if bingo(board, fill):
return np.sum(np.where(board == fill, 0, board)) * draw
return -1
def two(d_data: np.ndarray, b_data: np.ndarray) -> int:
"""
Figure out which board will win last. Once it wins, what would its final score be?
"""
# If number is drawn, replace with {fill}
fill = -1
# List of completed bingo boards
completed_idx = []
for draw in d_data:
# Replace drawn number by -1
b_data = np.where(b_data == draw, fill, b_data)
for board, i in zip(b_data, range(len(b_data))):
if bingo(board, fill) and i not in completed_idx:
completed_idx.append(i)
if len(completed_idx) == len(b_data):
return np.sum(np.where(board == fill, 0, board)) * draw
return -1
print(f"1. {one(draws, boards)}")
print(f"2. {two(draws, boards)}")
|
[
"numpy.where",
"numpy.transpose"
] |
[((396, 414), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (408, 414), True, 'import numpy as np\n'), ((924, 962), 'numpy.where', 'np.where', (['(b_data == draw)', 'fill', 'b_data'], {}), '(b_data == draw, fill, b_data)\n', (932, 962), True, 'import numpy as np\n'), ((1478, 1516), 'numpy.where', 'np.where', (['(b_data == draw)', 'fill', 'b_data'], {}), '(b_data == draw, fill, b_data)\n', (1486, 1516), True, 'import numpy as np\n'), ((1058, 1091), 'numpy.where', 'np.where', (['(board == fill)', '(0)', 'board'], {}), '(board == fill, 0, board)\n', (1066, 1091), True, 'import numpy as np\n'), ((1766, 1799), 'numpy.where', 'np.where', (['(board == fill)', '(0)', 'board'], {}), '(board == fill, 0, board)\n', (1774, 1799), True, 'import numpy as np\n')]
|
from module import main
main()
|
[
"module.main"
] |
[((24, 30), 'module.main', 'main', ([], {}), '()\n', (28, 30), False, 'from module import main\n')]
|
##############################################################
# #
# Automatic design of an #
# Asymmetrical Inverted Schmitt-Trigger with Single Supply #
# for E24 resistors scale #
# with tolerance analysis #
# #
##############################################################
# Author: <NAME> #
# Date: 2019.08.30 #
# License: MIT Open Source License #
# Description: This is a simple program to make the #
# automatic design of an Asymmetrical Inverted #
# Schmitt-Trigger with Single Supply, with #
# resistors from E24 scale. Typically used for #
# 1%, but in this case used for 5% or 0.1% . #
# The input is V_supply, V_low_threshold, #
# V_high_threshold and Resistor_tolerance_perc. #
# It works by making the full search of all #
# combinations of values from E24 to identify #
# the best ones. #
# In this way it speeds up immensely the manual #
# experimentation. It also makes resistor #
# tolerance analysis. Please see the schematic #
# diagram on the GitHub page. #
##############################################################
#######
# Please fill the following 4 program variables to your
# specification, see schematic diagram.
# VCC voltage in volts.
VCC = 5.0
# Input Voltage low threshold in volts.
V_low_threshold_target = 0.555
# Input Voltage high threshold in volts.
V_high_threshold_target = 0.575
# Resistor tolerance percentage 5.0%, 1.0%, 0.1%, one of this values [5.0, 1.0, 0.1].
Resistor_tolerance_perc = 1.0
#######
# Start of program.
import math
# E24 Standard resistor series.
E24_values = [1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2,
2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1,
5.6, 6.2, 6.8, 7.5, 8.2, 9.1]
# The scales of the resistor values so that an OpAmp circuit is stable,
# normally are between 1K and 100K, but I use a extended version from
# 100 Ohms to 1MOhms.
scales = [100, 1000, 10000, 100000]
def consistency_testing(VCC, V_low_threshold_target, V_high_threshold_target):
passed_tests = True
if not ( 0 < VCC):
print("Error in specification VCC, it has to be: 0 < VCC")
passed_tests = False
if not (V_low_threshold_target < V_high_threshold_target):
print("Error in specification, it has to be: V_low_threshold_target < V_high_threshold_target")
passed_tests = False
if not (0 <= V_low_threshold_target <= VCC):
print("Error in specification, it has to be: 0 <= V_low_threshold_target <= VCC")
passed_tests = False
if not (0 <= V_high_threshold_target <= VCC):
print("Error in specification, it has to be: 0 <= V_high_threshold_target <= VCC")
passed_tests = False
if Resistor_tolerance_perc not in [5.0, 1.0, 0.1]:
print("Error in specification Resistor_tolerance_perc, it has to be: 5.0, 1.0 or 0.1")
passed_tests = False
return passed_tests
def expansion_of_E24_values_for_range(E24_values, scales):
values_list = []
for scale in scales:
for val in E24_values:
value = val * scale
values_list.append(value)
return values_list
def calc_voltage_thresholds_for_circuit(VCC, R1, R2, R3):
V_low_threshold = 0.0
V_high_threshold = 0.0
# Calc V_low_threshold.
R_total_low = (R2 * R3) / float((R2 + R3))
V_low_threshold = VCC * R_total_low / float((R1 + R_total_low))
# Calc V_high_threshold.
R_total_high = (R1 * R3) / float((R1 + R3))
V_high_threshold = VCC * R2 / float((R2 + R_total_high))
return (V_low_threshold, V_high_threshold)
def calc_square_distance_error(V_low_threshold_target, V_high_threshold_target,
V_low_threshold_obtained, V_high_threshold_obtained):
res = math.sqrt( math.pow(V_low_threshold_target - V_low_threshold_obtained, 2) +
math.pow(V_high_threshold_target - V_high_threshold_obtained, 2) )
return res
def full_search_of_resister_values(values_list, VCC, V_low_threshold_target, V_high_threshold_target):
best_error = 1000000000.0
best_V_low_threshold = -1000.0
best_V_high_threshold = -1000.0
best_R1 = -1000.0
best_R2 = -1000.0
best_R3 = -1000.0
for R1 in values_list:
for R2 in values_list:
for R3 in values_list:
res = calc_voltage_thresholds_for_circuit(VCC, R1, R2, R3)
V_low_threshold_obtained, V_high_threshold_obtained = res
error = calc_square_distance_error(V_low_threshold_target, V_high_threshold_target,
V_low_threshold_obtained, V_high_threshold_obtained)
if error < best_error:
best_error = error
best_V_low_threshold = V_low_threshold_obtained
best_V_high_threshold = V_high_threshold_obtained
best_R1 = R1
best_R2 = R2
best_R3 = R3
return (best_error, best_V_low_threshold, best_V_high_threshold, best_R1, best_R2, best_R3)
def expand_resistor_vals_tolerance(R_val, Resistor_tolerance_perc):
resistor_vals = []
delta = R_val * Resistor_tolerance_perc * 0.01
resistor_vals.append(R_val - delta)
resistor_vals.append(R_val)
resistor_vals.append(R_val + delta)
return resistor_vals
def calc_absolute_distance_error(V_low_threshold_target, V_high_threshold_target,
V_low_threshold_obtained, V_high_threshold_obtained):
res = (math.fabs(V_low_threshold_target - V_low_threshold_obtained)
+ math.fabs(V_high_threshold_target - V_high_threshold_obtained))
return res
def worst_tolerance_resistor_analysis(VCC, V_low_threshold_target, V_high_threshold_target,
R1_nominal, R2_nominal, R3_nominal, Resistor_tolerance_perc):
worst_error = 0.0
worst_V_low_threshold = 0.00000001
worst_V_high_threshold = 0.00000001
R1_values = expand_resistor_vals_tolerance(R1_nominal, Resistor_tolerance_perc)
R2_values = expand_resistor_vals_tolerance(R2_nominal, Resistor_tolerance_perc)
R3_values = expand_resistor_vals_tolerance(R3_nominal, Resistor_tolerance_perc)
for R1 in R1_values:
for R2 in R2_values:
for R3 in R3_values:
res = calc_voltage_thresholds_for_circuit(VCC, R1, R2, R3)
V_low_threshold_obtained, V_high_threshold_obtained = res
error = calc_absolute_distance_error(V_low_threshold_target, V_high_threshold_target,
V_low_threshold_obtained, V_high_threshold_obtained)
if error > worst_error:
worst_error = error
worst_V_low_threshold = V_low_threshold_obtained
worst_V_high_threshold = V_high_threshold_obtained
return (worst_error, worst_V_low_threshold, worst_V_high_threshold)
def main():
print("##############################################################")
print("# #")
print("# Automatic design of an #")
print("# Asymmetrical Inverted Schmitt-Trigger with Single Supply #")
print("# for E24 resistors scale #")
print("# with tolerance analysis #")
print("# #")
print("##############################################################")
print("")
print("### Specification:")
print("VCC: ", VCC, " Volts")
print("V_low_threshold_target: ", V_low_threshold_target, " Volts")
print("V_high_threshold_target: ", V_high_threshold_target, " Volts")
print("Resistor_tolerance_perc: ", Resistor_tolerance_perc, " %")
print("")
passed_tests = consistency_testing(VCC, V_low_threshold_target, V_high_threshold_target)
if passed_tests == False:
return
values_list = expansion_of_E24_values_for_range(E24_values, scales)
res = full_search_of_resister_values(values_list, VCC, V_low_threshold_target, V_high_threshold_target)
best_error, V_low_threshold_obtained, V_high_threshold_obtained, best_R1, best_R2, best_R3 = res
print("### Solution")
print("Best_error: ", best_error)
print("V_low_threshold_obtained: ", V_low_threshold_obtained, " Volts, delta: ",
math.fabs(V_low_threshold_target - V_low_threshold_obtained), " Volts" )
print("V_high_threshold_obtained: ", V_high_threshold_obtained, " Volts, delta: ",
math.fabs(V_high_threshold_target - V_high_threshold_obtained), " Volts" )
print("Best_R1: ", best_R1, " Ohms 1%")
print("Best_R2: ", best_R2, " Ohms 1%")
print("Best_R3: ", best_R3, " Ohms 1%")
print("")
res = worst_tolerance_resistor_analysis(VCC, V_low_threshold_target, V_high_threshold_target,
best_R1, best_R2, best_R3, Resistor_tolerance_perc)
worst_error, worst_V_low_threshold_obtained, worst_V_high_threshold_obtained = res
print("### Resistor tolerance analysis")
print("Worst_error: ", worst_error)
print("Worst V_low_threshold_obtained: ", worst_V_low_threshold_obtained, " Volts, delta: ",
math.fabs(V_low_threshold_target - worst_V_low_threshold_obtained), " Volts" )
print("Worst V_high_threshold_obtained: ", worst_V_high_threshold_obtained, " Volts, delta: ",
math.fabs(V_high_threshold_target - worst_V_high_threshold_obtained), " Volts" )
if __name__ == "__main__":
main()
|
[
"math.pow",
"math.fabs"
] |
[((6085, 6145), 'math.fabs', 'math.fabs', (['(V_low_threshold_target - V_low_threshold_obtained)'], {}), '(V_low_threshold_target - V_low_threshold_obtained)\n', (6094, 6145), False, 'import math\n'), ((6161, 6223), 'math.fabs', 'math.fabs', (['(V_high_threshold_target - V_high_threshold_obtained)'], {}), '(V_high_threshold_target - V_high_threshold_obtained)\n', (6170, 6223), False, 'import math\n'), ((9054, 9114), 'math.fabs', 'math.fabs', (['(V_low_threshold_target - V_low_threshold_obtained)'], {}), '(V_low_threshold_target - V_low_threshold_obtained)\n', (9063, 9114), False, 'import math\n'), ((9227, 9289), 'math.fabs', 'math.fabs', (['(V_high_threshold_target - V_high_threshold_obtained)'], {}), '(V_high_threshold_target - V_high_threshold_obtained)\n', (9236, 9289), False, 'import math\n'), ((9911, 9977), 'math.fabs', 'math.fabs', (['(V_low_threshold_target - worst_V_low_threshold_obtained)'], {}), '(V_low_threshold_target - worst_V_low_threshold_obtained)\n', (9920, 9977), False, 'import math\n'), ((10102, 10170), 'math.fabs', 'math.fabs', (['(V_high_threshold_target - worst_V_high_threshold_obtained)'], {}), '(V_high_threshold_target - worst_V_high_threshold_obtained)\n', (10111, 10170), False, 'import math\n'), ((4304, 4366), 'math.pow', 'math.pow', (['(V_low_threshold_target - V_low_threshold_obtained)', '(2)'], {}), '(V_low_threshold_target - V_low_threshold_obtained, 2)\n', (4312, 4366), False, 'import math\n'), ((4391, 4455), 'math.pow', 'math.pow', (['(V_high_threshold_target - V_high_threshold_obtained)', '(2)'], {}), '(V_high_threshold_target - V_high_threshold_obtained, 2)\n', (4399, 4455), False, 'import math\n')]
|
import json
from typing import Any, Dict, List
from contrib.descriptions import VulnDescriptionProvider
from contrib.internal_types import ScanResult
from contrib.report_builders import ReportBuilder
class JsonReportBuilder(ReportBuilder):
def __init__(self, description_provider: VulnDescriptionProvider):
self.description_provider = description_provider
self._buffer = {'ips': [], 'vulnerable': {}, 'not_vulnerable': {}}
def init_report(self, start_date: str, nmap_command: str):
self._buffer['start_date'] = start_date
self._buffer['nmap_command'] = nmap_command
def build(self) -> Any:
return json.dumps(self._buffer)
def add_vulnerable_services(self, scan_results: Dict[str, ScanResult]):
for app_name, result in scan_results.items():
self._buffer['vulnerable'][app_name] = {
'vulnerabilities': [],
'locations': self._serialize_locations(result.locations)
}
for v in result.vulns:
data = v.to_dict()
description = self.description_provider.get_description(v.name, v.vuln_type)
data['description'], data['url'] = description.text, description.url
self._buffer['vulnerable'][app_name]['vulnerabilities'].append(data)
def add_non_vulnerable_services(self, scan_results: Dict[str, ScanResult]):
for app_name, result in scan_results.items():
self._buffer['not_vulnerable'][app_name] = {
'locations': self._serialize_locations(result.locations)
}
def add_ip_address(self, ip: str):
self._buffer['ips'].append(ip)
@staticmethod
def _serialize_locations(locations: Dict[str, List[str]]):
return {loc: [int(port) for port in ports] for loc, ports in locations.items()}
|
[
"json.dumps"
] |
[((654, 678), 'json.dumps', 'json.dumps', (['self._buffer'], {}), '(self._buffer)\n', (664, 678), False, 'import json\n')]
|
import pytest
from src.users.role import Role
from .presets.role import role_metrics
@pytest.mark.parametrize(
("name, can_edit_settings, can_create_users," "can_edit_roles, can_manage_roles"),
role_metrics,
)
def test_create_role_object(
name,
can_edit_settings,
can_create_users,
can_edit_roles,
can_manage_roles,
database,
):
role = Role(
name=name,
can_edit_settings=can_edit_settings,
can_create_users=can_create_users,
can_edit_roles=can_edit_roles,
can_manage_roles=can_manage_roles,
)
database.session.add(role)
database.session.commit()
|
[
"pytest.mark.parametrize",
"src.users.role.Role"
] |
[((89, 214), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name, can_edit_settings, can_create_users,can_edit_roles, can_manage_roles"""', 'role_metrics'], {}), "(\n 'name, can_edit_settings, can_create_users,can_edit_roles, can_manage_roles'\n , role_metrics)\n", (112, 214), False, 'import pytest\n'), ((375, 538), 'src.users.role.Role', 'Role', ([], {'name': 'name', 'can_edit_settings': 'can_edit_settings', 'can_create_users': 'can_create_users', 'can_edit_roles': 'can_edit_roles', 'can_manage_roles': 'can_manage_roles'}), '(name=name, can_edit_settings=can_edit_settings, can_create_users=\n can_create_users, can_edit_roles=can_edit_roles, can_manage_roles=\n can_manage_roles)\n', (379, 538), False, 'from src.users.role import Role\n')]
|
# -*- coding: utf-8 -*-
import time, os, io, picamera, threading
from flask import Flask, request, session, url_for, redirect, render_template, g, Response, send_file
# configuration
DATABASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'minitwit.db')
DEBUG = False
SECRET_KEY = 'This is a very secrey key.'
# PiCam, don't initialize the camera unless required.
class PiCam:
def __init__(self):
self.cam = None
self.time = 0
self.lock = threading.Lock()
self.s = io.BytesIO()
pass
def init_cam(self):
self.cam = picamera.PiCamera()
self.cam.start_preview()
self.cam.vflip = True
self.cam.hflip = True
time.sleep(2)
pass
def dup_stream(self):
_s = io.BytesIO()
self.lock.acquire()
### THREAD LOCK BEGIN
self.s.seek(0)
_s.write(self.s.read())
### THREAD LOCK END
self.lock.release()
_s.seek(0)
return _s
def capture(self):
if (self.cam is None):
self.init_cam()
_t = time.time()
# 30fps: 0.0333...
if (_t - self.time > 0.02):
self.time = _t
self.lock.acquire()
### THREAD LOCK BEGIN
self.s.seek(0)
self.cam.capture(self.s, 'png')
### THREAD LOCK END
self.lock.release()
return self.dup_stream()
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('PICAM_SETTINGS', silent=True)
@app.route('/')
def the_camera():
return render_template('index.html')
@app.route('/login')
def login():
return 'TODO: Login';
my_cam = PiCam()
@app.route('/capture')
def capture():
return send_file(my_cam.capture(), mimetype='image/png')
if __name__ == "__main__":
app.run(host='0.0.0.0')
while True:
pass
|
[
"io.BytesIO",
"os.path.realpath",
"flask.Flask",
"time.sleep",
"threading.Lock",
"time.time",
"flask.render_template",
"picamera.PiCamera"
] |
[((1335, 1350), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1340, 1350), False, 'from flask import Flask, request, session, url_for, redirect, render_template, g, Response, send_file\n'), ((1482, 1511), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1497, 1511), False, 'from flask import Flask, request, session, url_for, redirect, render_template, g, Response, send_file\n'), ((225, 251), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (241, 251), False, 'import time, os, io, picamera, threading\n'), ((469, 485), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (483, 485), False, 'import time, os, io, picamera, threading\n'), ((499, 511), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (509, 511), False, 'import time, os, io, picamera, threading\n'), ((561, 580), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (578, 580), False, 'import time, os, io, picamera, threading\n'), ((666, 679), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (676, 679), False, 'import time, os, io, picamera, threading\n'), ((725, 737), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (735, 737), False, 'import time, os, io, picamera, threading\n'), ((999, 1010), 'time.time', 'time.time', ([], {}), '()\n', (1008, 1010), False, 'import time, os, io, picamera, threading\n')]
|
import pickle
from pathlib import Path
from datetime import datetime as dt
def storage_timestamp(t=dt.now(), storage_dir=Path.home() / ".pyx_store", ext="p"):
"""
Return a timestamp now suitable to create a file path as:
yy/mm/dd/hh-mm-ss
By default under `~/.pyx_store/` (`storage_dir`) with file extension `.p` (`ext`):
~/.pyx_store/yy/mm/dd/hh-mm-ss.p
Assuming you will never pickle a workspace representation more than once per second,
this can be used as a path into `~/.pyx_store/yy/mm/dd/hh_mm_ss`.
"""
assert type(t) is dt, TypeError("Time isn't a datetime.datetime instance")
datestamp, timestamp = t.isoformat().split("T")
datestamp = datestamp.replace("-", "/")
timestamp = timestamp[:timestamp.find(".")].replace(":", "-")
subpath = storage_dir / Path(datestamp)
storage_path = storage_dir / subpath
storage_path.mkdir(parents=True, exist_ok=True)
file_name = f"{timestamp}.{ext}"
return storage_path / file_name
def pickle_vars(local_names=vars()):
checklist = ["ff_session", "x_session", "wm_territory", "tmux_server"]
pickle_filepath = storage_timestamp() # Create a dir for today's date under ~/.pyx_store/
storables = []
for var_name in checklist:
if var_name in local_names:
storables.append(local_names[var_name])
with open(pickle_filepath, "wb") as f:
pickle.dump(storables, file=f, protocol=-1)
return
|
[
"pathlib.Path",
"datetime.datetime.now",
"pathlib.Path.home",
"pickle.dump"
] |
[((100, 108), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (106, 108), True, 'from datetime import datetime as dt\n'), ((122, 133), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (131, 133), False, 'from pathlib import Path\n'), ((832, 847), 'pathlib.Path', 'Path', (['datestamp'], {}), '(datestamp)\n', (836, 847), False, 'from pathlib import Path\n'), ((1410, 1453), 'pickle.dump', 'pickle.dump', (['storables'], {'file': 'f', 'protocol': '(-1)'}), '(storables, file=f, protocol=-1)\n', (1421, 1453), False, 'import pickle\n')]
|
# Copyright (c) 2013--2014 King's College London
# Created by the Software Development Team <http://soft-dev.org/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
try:
# checkout https://github.com/Brittix1023/mipy into eco/lib/
sys.path.append("../mipy")
from mipy import kernel, request_listener
has_mipy = True
except ImportError:
has_mipy = False
from incparser.astree import TextNode, EOS #BOS, ImageNode, FinishSymbol
from grammar_parser.gparser import IndentationTerminal # BOS, MagicTerminal, Nonterminal
from PyQt5 import QtCore
from PyQt5.QtGui import QPen, QColor, QImage
from PyQt5.QtWidgets import QApplication
import math, os
class Renderer(object):
def __init__(self, fontwt, fontht, fontd):
self.fontwt = fontwt
self.fontht = fontht
self.fontd = fontd
def paint_node(self, paint, node, x, y, highlighter):
raise NotImplementedError
def nextNode(self, node):
node = node.next_term
if isinstance(node, EOS):
return None
def update_image(self, node):
pass
def setStyle(self, paint, style):
f = paint.font()
if style == "italic":
f.setItalic(True)
f.setBold(False)
elif style == "bold":
f.setItalic(False)
f.setBold(True)
else:
f.setItalic(False)
f.setBold(False)
paint.setFont(f)
class NormalRenderer(Renderer):
def paint_node(self, paint, node, x, y, highlighter):
dx, dy = (0, 0)
if node.symbol.name == "\r" or isinstance(node, EOS):
return dx, dy
if isinstance(node.symbol, IndentationTerminal):
paint.setPen(QPen(QColor("#aa3333")))
self.setStyle(paint, highlighter.get_style(node))
if QApplication.instance().showindent is True:
if node.symbol.name == "INDENT":
text = ">"
elif node.symbol.name == "DEDENT":
text = "<"
else:
return dx, dy
paint.drawText(QtCore.QPointF(x, 3 + self.fontht + y*self.fontht - self.fontd), text)
return 1*self.fontwt, dy
else:
return dx, dy
if isinstance(node, TextNode):
paint.setPen(QPen(QColor(highlighter.get_color(node))))
self.setStyle(paint, highlighter.get_style(node))
text = node.symbol.name
if not (node.lookup == "<ws>" and node.symbol.name.startswith(" ")): # speedhack: don't draw invisible nodes
paint.drawText(QtCore.QPointF(x, 3 + self.fontht + y*self.fontht - self.fontd), text)
dx = len(text) * self.fontwt
dy = 0
return dx, dy
def doubleClick(self):
pass # select/unselect
class ImageRenderer(NormalRenderer):
def paint_node(self, paint, node, x, y, highlighter):
self.update_image(node)
dx, dy = (0, 0)
if node.image is not None and not node.plain_mode:
paint.drawImage(QtCore.QPoint(x, 3 + y * self.fontht), node.image)
dx = int(math.ceil(node.image.width() * 1.0 / self.fontwt) * self.fontwt)
dy = int(math.ceil(node.image.height() * 1.0 / self.fontht))
else:
dx, dy = NormalRenderer.paint_node(self, paint, node, x, y, highlighter)
return dx, dy
def get_filename(self, node):
return node.symbol.name
def update_image(self, node):
filename = self.get_filename(node)
if node.image_src == filename:
return
if os.path.isfile(filename):
node.image = QImage(filename)
node.image_src = filename
else:
node.image = None
node.image_src = None
def doubleClick(self):
pass # switch between display modes
class ChemicalRenderer(ImageRenderer):
def get_filename(self, node):
return "chemicals/" + node.symbol.name + ".png"
if not has_mipy:
class IPythonRenderer(NormalRenderer):
pass
else:
class IPythonRenderer(NormalRenderer):
proc = kernel.IPythonKernelProcess()
def paint_node(self, paint, node, x, y, highlighter):
lbox = node.get_root().get_magicterminal()
if lbox.plain_mode:
return NormalRenderer.paint_node(self, paint, node, x, y, highlighter)
else:
dx, dy = NormalRenderer.paint_node(self, paint, node, x, y, highlighter)
if isinstance(node.next_term, EOS):
content = self.get_content(lbox)
try:
krn = IPythonRenderer.proc.connection
if krn is not None:
listener = IPythonExecuteListener()
krn.execute_request(content, listener=listener)
while not listener.finished:
krn.poll(-1)
text = str(listener.result)
except Exception as e:
text = e.message
paint.drawText(QtCore.QPointF(x+100, self.fontht + y*self.fontht), " | "+text)
return dx, dy
def get_content(self, lbox):
node = lbox.symbol.ast.children[0].next_term
l = []
while not isinstance(node, EOS):
if not isinstance(node.symbol, IndentationTerminal):
l.append(node.symbol.name)
node = node.next_term
return "".join(l)
class IPythonExecuteListener(request_listener.ExecuteRequestListener):
def __init__(self):
self.result = None
self.finished = False
def on_execute_result(self, execution_count, data, metadata):
self.result = data['text/plain']
def on_execute_finished(self):
self.finished = True
def on_error(self, ename, value, traceback):
raise Exception(ename)
def get_renderer(parent, fontwt, fontht, fontd):
if parent == "Chemicals":
return ChemicalRenderer(fontwt, fontht, fontd)
if parent == "Image":
return ImageRenderer(fontwt, fontht, fontd)
if parent == "IPython":
return IPythonRenderer(fontwt, fontht, fontd)
return NormalRenderer(fontwt, fontht, fontd)
|
[
"sys.path.append",
"PyQt5.QtGui.QColor",
"mipy.kernel.IPythonKernelProcess",
"os.path.isfile",
"PyQt5.QtGui.QImage",
"PyQt5.QtCore.QPoint",
"PyQt5.QtWidgets.QApplication.instance",
"PyQt5.QtCore.QPointF"
] |
[((1258, 1284), 'sys.path.append', 'sys.path.append', (['"""../mipy"""'], {}), "('../mipy')\n", (1273, 1284), False, 'import sys\n'), ((4638, 4662), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (4652, 4662), False, 'import math, os\n'), ((5162, 5191), 'mipy.kernel.IPythonKernelProcess', 'kernel.IPythonKernelProcess', ([], {}), '()\n', (5189, 5191), False, 'from mipy import kernel, request_listener\n'), ((4689, 4705), 'PyQt5.QtGui.QImage', 'QImage', (['filename'], {}), '(filename)\n', (4695, 4705), False, 'from PyQt5.QtGui import QPen, QColor, QImage\n'), ((4093, 4130), 'PyQt5.QtCore.QPoint', 'QtCore.QPoint', (['x', '(3 + y * self.fontht)'], {}), '(x, 3 + y * self.fontht)\n', (4106, 4130), False, 'from PyQt5 import QtCore\n'), ((2734, 2751), 'PyQt5.QtGui.QColor', 'QColor', (['"""#aa3333"""'], {}), "('#aa3333')\n", (2740, 2751), False, 'from PyQt5.QtGui import QPen, QColor, QImage\n'), ((2831, 2854), 'PyQt5.QtWidgets.QApplication.instance', 'QApplication.instance', ([], {}), '()\n', (2852, 2854), False, 'from PyQt5.QtWidgets import QApplication\n'), ((3124, 3189), 'PyQt5.QtCore.QPointF', 'QtCore.QPointF', (['x', '(3 + self.fontht + y * self.fontht - self.fontd)'], {}), '(x, 3 + self.fontht + y * self.fontht - self.fontd)\n', (3138, 3189), False, 'from PyQt5 import QtCore\n'), ((3641, 3706), 'PyQt5.QtCore.QPointF', 'QtCore.QPointF', (['x', '(3 + self.fontht + y * self.fontht - self.fontd)'], {}), '(x, 3 + self.fontht + y * self.fontht - self.fontd)\n', (3655, 3706), False, 'from PyQt5 import QtCore\n'), ((6190, 6244), 'PyQt5.QtCore.QPointF', 'QtCore.QPointF', (['(x + 100)', '(self.fontht + y * self.fontht)'], {}), '(x + 100, self.fontht + y * self.fontht)\n', (6204, 6244), False, 'from PyQt5 import QtCore\n')]
|
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, ModelViewSet
from .serializers import (
GradeSerializer,
SubmissionSerializer,
AttachmentSerializer,
TeacherSubmissionSerializer,
)
from teleband.courses.models import Course
from teleband.submissions.models import Grade, Submission, SubmissionAttachment
from teleband.assignments.models import Assignment
class SubmissionViewSet(
ListModelMixin, RetrieveModelMixin, CreateModelMixin, GenericViewSet
):
serializer_class = SubmissionSerializer
queryset = Submission.objects.all()
def get_queryset(self):
return self.queryset.filter(assignment_id=self.kwargs["assignment_id"])
def perform_create(self, serializer):
serializer.save(
assignment=Assignment.objects.get(pk=self.kwargs["assignment_id"])
)
# @action(detail=False)
# def get_
class AttachmentViewSet(
ListModelMixin, RetrieveModelMixin, CreateModelMixin, GenericViewSet
):
serializer_class = AttachmentSerializer
queryset = SubmissionAttachment.objects.all()
def get_queryset(self):
return self.queryset.filter(submission_id=self.kwargs["submission_pk"])
def perform_create(self, serializer):
serializer.save(
submission=Submission.objects.get(pk=self.kwargs["submission_pk"])
)
class TeacherSubmissionViewSet(ListModelMixin, RetrieveModelMixin, GenericViewSet):
serializer_class = TeacherSubmissionSerializer
queryset = Submission.objects.all()
# def get_queryset(self,):
# pass
@action(detail=False)
def recent(self, request, **kwargs):
if "piece_slug" not in request.GET or "activity_name" not in request.GET:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"error": "Missing piece_slug or activity_name (figure it out!) in get data"
},
)
course_id = self.kwargs["course_slug_slug"]
piece_slug = request.GET["piece_slug"]
activity_name = request.GET["activity_name"]
queryset = (
Submission.objects.filter(
assignment__enrollment__course__slug=course_id,
assignment__activity__activity_type__name=activity_name,
assignment__part__piece__slug=piece_slug,
)
.order_by("assignment__enrollment", "-submitted")
.distinct("assignment__enrollment")
)
serializer = self.serializer_class(
queryset, many=True, context={"request": request}
)
return Response(status=status.HTTP_200_OK, data=serializer.data)
class GradeViewSet(ModelViewSet):
queryset = Grade.objects.all()
serializer_class = GradeSerializer
|
[
"teleband.submissions.models.Submission.objects.all",
"teleband.submissions.models.SubmissionAttachment.objects.all",
"teleband.submissions.models.Submission.objects.filter",
"rest_framework.response.Response",
"rest_framework.decorators.action",
"teleband.submissions.models.Submission.objects.get",
"teleband.assignments.models.Assignment.objects.get",
"teleband.submissions.models.Grade.objects.all"
] |
[((797, 821), 'teleband.submissions.models.Submission.objects.all', 'Submission.objects.all', ([], {}), '()\n', (819, 821), False, 'from teleband.submissions.models import Grade, Submission, SubmissionAttachment\n'), ((1294, 1328), 'teleband.submissions.models.SubmissionAttachment.objects.all', 'SubmissionAttachment.objects.all', ([], {}), '()\n', (1326, 1328), False, 'from teleband.submissions.models import Grade, Submission, SubmissionAttachment\n'), ((1747, 1771), 'teleband.submissions.models.Submission.objects.all', 'Submission.objects.all', ([], {}), '()\n', (1769, 1771), False, 'from teleband.submissions.models import Grade, Submission, SubmissionAttachment\n'), ((1825, 1845), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)'}), '(detail=False)\n', (1831, 1845), False, 'from rest_framework.decorators import action\n'), ((3002, 3021), 'teleband.submissions.models.Grade.objects.all', 'Grade.objects.all', ([], {}), '()\n', (3019, 3021), False, 'from teleband.submissions.models import Grade, Submission, SubmissionAttachment\n'), ((2894, 2951), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK', 'data': 'serializer.data'}), '(status=status.HTTP_200_OK, data=serializer.data)\n', (2902, 2951), False, 'from rest_framework.response import Response\n'), ((1988, 2120), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_400_BAD_REQUEST', 'data': "{'error': 'Missing piece_slug or activity_name (figure it out!) in get data'}"}), "(status=status.HTTP_400_BAD_REQUEST, data={'error':\n 'Missing piece_slug or activity_name (figure it out!) in get data'})\n", (1996, 2120), False, 'from rest_framework.response import Response\n'), ((1022, 1077), 'teleband.assignments.models.Assignment.objects.get', 'Assignment.objects.get', ([], {'pk': "self.kwargs['assignment_id']"}), "(pk=self.kwargs['assignment_id'])\n", (1044, 1077), False, 'from teleband.assignments.models import Assignment\n'), ((1529, 1584), 'teleband.submissions.models.Submission.objects.get', 'Submission.objects.get', ([], {'pk': "self.kwargs['submission_pk']"}), "(pk=self.kwargs['submission_pk'])\n", (1551, 1584), False, 'from teleband.submissions.models import Grade, Submission, SubmissionAttachment\n'), ((2406, 2586), 'teleband.submissions.models.Submission.objects.filter', 'Submission.objects.filter', ([], {'assignment__enrollment__course__slug': 'course_id', 'assignment__activity__activity_type__name': 'activity_name', 'assignment__part__piece__slug': 'piece_slug'}), '(assignment__enrollment__course__slug=course_id,\n assignment__activity__activity_type__name=activity_name,\n assignment__part__piece__slug=piece_slug)\n', (2431, 2586), False, 'from teleband.submissions.models import Grade, Submission, SubmissionAttachment\n')]
|
import json
import base64
import requests
from flask import Flask, abort, jsonify, request
from app.config import Config
import os
import schedule
import threading
import time
from pathlib import Path
app = Flask(__name__)
# set environment variable
app.config["ENV"] = Config.DEPLOY_ENV
times = []
image_counter = 0
image_count = 20
image_path = Path.cwd() / 'images'
def create_database():
image = get_image()
save_image(image, f'takapuna{image_counter}.png')
for i in range(0,image_count):
save_image(image, f'takapuna{i}.png')
create_database()
def schedule_check():
while True:
schedule.run_pending()
time.sleep(5)
def image_run():
try:
update_images()
except Exception as e:
print('hit exepction!')
print(e)
pass
def get_image():
r = requests.get('http://www.windsurf.co.nz/webcams/takapuna.jpg')
if r.status_code is 200:
return r.content
else:
print(r.status_code)
print(r.text)
def save_image(image, filename):
f = open(image_path / filename, 'wb') # first argument is the filename
f.write(image)
f.close()
def update_images():
global image_counter
image = get_image()
save_image(image, f'takapuna{image_counter}.png')
image_counter += 1
schedule.every(5).minutes.do(image_run)
x = threading.Thread(target=schedule_check, daemon=True)
x.start()
@app.errorhandler(404)
def resource_not_found(e):
return jsonify(error=str(e)), 404
@app.route("/health")
def health():
return json.dumps({"status": "healthy"})
@app.route("/")
def cam():
return create_page_from_images( get_latest_images() )
def get_latest_images():
image_list = []
for i in range(0,image_count):
data_uri = base64.b64encode(open( image_path / f'takapuna{i}.png', 'rb').read()).decode('utf-8')
img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri)
image_list.append(img_tag)
i += 1
return image_list
def create_page_from_images(image_list):
page = ''
for im in image_list:
page += im
page += '\n'
return page
|
[
"schedule.run_pending",
"threading.Thread",
"flask.Flask",
"json.dumps",
"time.sleep",
"requests.get",
"schedule.every",
"pathlib.Path.cwd"
] |
[((213, 228), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (218, 228), False, 'from flask import Flask, abort, jsonify, request\n'), ((1351, 1403), 'threading.Thread', 'threading.Thread', ([], {'target': 'schedule_check', 'daemon': '(True)'}), '(target=schedule_check, daemon=True)\n', (1367, 1403), False, 'import threading\n'), ((354, 364), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (362, 364), False, 'from pathlib import Path\n'), ((836, 898), 'requests.get', 'requests.get', (['"""http://www.windsurf.co.nz/webcams/takapuna.jpg"""'], {}), "('http://www.windsurf.co.nz/webcams/takapuna.jpg')\n", (848, 898), False, 'import requests\n'), ((1552, 1585), 'json.dumps', 'json.dumps', (["{'status': 'healthy'}"], {}), "({'status': 'healthy'})\n", (1562, 1585), False, 'import json\n'), ((625, 647), 'schedule.run_pending', 'schedule.run_pending', ([], {}), '()\n', (645, 647), False, 'import schedule\n'), ((656, 669), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (666, 669), False, 'import time\n'), ((1307, 1324), 'schedule.every', 'schedule.every', (['(5)'], {}), '(5)\n', (1321, 1324), False, 'import schedule\n')]
|
from django.test import TestCase, Client
from plan.models import Announcement
class AnnouncementTestCase(TestCase):
def setUp(self):
self.client = Client()
Announcement.objects.create(title='test1', content='test content1')
Announcement.objects.create(title='test2', content='test content2')
Announcement.objects.create(title='test3', content='test content3')
def tearDown(self):
Announcement.objects.all().delete()
def test_get_one_announcement(self):
"""测试获取单个公告"""
for anno in Announcement.objects.all():
response = self.client.get(f'/plan/announcement/{anno.id}/')
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content, {
'id': anno.id,
'title': anno.title,
'created_date': str(anno.created_date),
'changed_date': str(anno.changed_date),
'content': anno.content,
})
def test_get_all_announcements(self):
"""测试获取所有公告"""
response = self.client.get('/plan/announcement/')
self.assertEqual(response.status_code, 200)
expected_data = list()
for anno in Announcement.objects.all():
expected_data.append({
'id': anno.id,
'title': anno.title,
'created_date': str(anno.created_date),
'changed_date': str(anno.changed_date),
})
self.assertJSONEqual(response.content, expected_data)
|
[
"plan.models.Announcement.objects.create",
"plan.models.Announcement.objects.all",
"django.test.Client"
] |
[((162, 170), 'django.test.Client', 'Client', ([], {}), '()\n', (168, 170), False, 'from django.test import TestCase, Client\n'), ((179, 246), 'plan.models.Announcement.objects.create', 'Announcement.objects.create', ([], {'title': '"""test1"""', 'content': '"""test content1"""'}), "(title='test1', content='test content1')\n", (206, 246), False, 'from plan.models import Announcement\n'), ((255, 322), 'plan.models.Announcement.objects.create', 'Announcement.objects.create', ([], {'title': '"""test2"""', 'content': '"""test content2"""'}), "(title='test2', content='test content2')\n", (282, 322), False, 'from plan.models import Announcement\n'), ((331, 398), 'plan.models.Announcement.objects.create', 'Announcement.objects.create', ([], {'title': '"""test3"""', 'content': '"""test content3"""'}), "(title='test3', content='test content3')\n", (358, 398), False, 'from plan.models import Announcement\n'), ((565, 591), 'plan.models.Announcement.objects.all', 'Announcement.objects.all', ([], {}), '()\n', (589, 591), False, 'from plan.models import Announcement\n'), ((1242, 1268), 'plan.models.Announcement.objects.all', 'Announcement.objects.all', ([], {}), '()\n', (1266, 1268), False, 'from plan.models import Announcement\n'), ((436, 462), 'plan.models.Announcement.objects.all', 'Announcement.objects.all', ([], {}), '()\n', (460, 462), False, 'from plan.models import Announcement\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2021 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
from ros_compatibility.core import get_ros_version
ROS_VERSION = get_ros_version()
if ROS_VERSION == 1:
import rospy
class ROSException(rospy.ROSException):
pass
class ROSInterruptException(rospy.ROSInterruptException):
pass
class ServiceException(rospy.ServiceException):
pass
elif ROS_VERSION == 2:
import rclpy.exceptions
class ROSException(Exception):
pass
class ROSInterruptException(rclpy.exceptions.ROSInterruptException):
pass
class ServiceException(Exception):
pass
|
[
"ros_compatibility.core.get_ros_version"
] |
[((252, 269), 'ros_compatibility.core.get_ros_version', 'get_ros_version', ([], {}), '()\n', (267, 269), False, 'from ros_compatibility.core import get_ros_version\n')]
|
import igraph as ig
import numpy as np
from scipy.special import betaln
g = ig.Graph.Read_GML('karate.txt')
X = np.array(g.get_adjacency().data)
def irm(X, T, a, b, A, random_seed = 42):
N = len(X)
z = np.ones([N,1])
Z = []
np.random.seed(random_seed)
for t in range(T): # for T iterations
for n in range(N): # for each node n
#nn = index mask without currently sampled node n
nn = [_ for _ in range(N)]
nn.remove(n)
X_ = X[np.ix_(nn,nn)] #adjacency matrix without currently sampled node
# K = n. of components
K = len(z[0])
# Delete empty component if present
if K > 1:
idx = np.argwhere(np.sum(z[nn], 0) == 0)
z = np.delete(z, idx, axis=1)
K -= len(idx)
# m = n. of nodes in each component
m = np.sum(z[nn], 0)[np.newaxis]
M = np.tile(m, (K, 1))
# M1 = n. of links between components without current node
M1 = z[nn].T @ X_ @ z[nn] - np.diag(np.sum(X_@z[nn]*z[nn], 0) / 2)
# M0 = n. of non-links between components without current node
M0 = m.T@m - np.diag((m*(m+1) / 2).flatten()) - M1
# r = n. of links from current node to components
r = z[nn].T @ X[nn, n]
R = np.tile(r, (K, 1))
# lik matrix of current node sampled to each component
likelihood = betaln(M1+R+a, M0+M-R+b) - betaln(M1+a, M0+b)
# lik of current node to new component
likelihood_n = betaln(r+a, m-r+b) - betaln(a,b)
logLik = np.sum(np.concatenate([likelihood, likelihood_n]), 1)
logPrior = np.log(np.append(m, A))
logPost = logPrior + logLik
# Convert from log probabilities, normalized to max
P = np.exp(logPost-max(logPost))
# Assignment through random draw fron unif(0,1), taking first value from prob. vector
draw = np.random.rand()
i = np.argwhere(draw<np.cumsum(P)/sum(P))[0]
# Assignment of current node to component i
z[n,:] = 0
if i == K: # If new component: add new column to partition matrix
z = np.hstack((z, np.zeros((N,1))))
z[n,i] = 1
# Delete empty component if present
idx = np.argwhere(np.all(z[..., :] == 0, axis=0))
z = np.delete(z, idx, axis=1)
Z.append(z)
print(z)
print(m)
return Z
T = 500
a = 1
b = 1
A = 10
Z = irm(X, T, a, b, A)
for i in range(1, 11):
print(np.sum(Z[-i], 0))
|
[
"numpy.sum",
"numpy.random.seed",
"numpy.concatenate",
"igraph.Graph.Read_GML",
"numpy.ix_",
"numpy.zeros",
"numpy.ones",
"scipy.special.betaln",
"numpy.append",
"numpy.cumsum",
"numpy.tile",
"numpy.random.rand",
"numpy.delete",
"numpy.all"
] |
[((77, 108), 'igraph.Graph.Read_GML', 'ig.Graph.Read_GML', (['"""karate.txt"""'], {}), "('karate.txt')\n", (94, 108), True, 'import igraph as ig\n'), ((212, 227), 'numpy.ones', 'np.ones', (['[N, 1]'], {}), '([N, 1])\n', (219, 227), True, 'import numpy as np\n'), ((243, 270), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (257, 270), True, 'import numpy as np\n'), ((2486, 2511), 'numpy.delete', 'np.delete', (['z', 'idx'], {'axis': '(1)'}), '(z, idx, axis=1)\n', (2495, 2511), True, 'import numpy as np\n'), ((2663, 2679), 'numpy.sum', 'np.sum', (['Z[-i]', '(0)'], {}), '(Z[-i], 0)\n', (2669, 2679), True, 'import numpy as np\n'), ((951, 969), 'numpy.tile', 'np.tile', (['m', '(K, 1)'], {}), '(m, (K, 1))\n', (958, 969), True, 'import numpy as np\n'), ((1401, 1419), 'numpy.tile', 'np.tile', (['r', '(K, 1)'], {}), '(r, (K, 1))\n', (1408, 1419), True, 'import numpy as np\n'), ((2063, 2079), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2077, 2079), True, 'import numpy as np\n'), ((2442, 2472), 'numpy.all', 'np.all', (['(z[..., :] == 0)'], {'axis': '(0)'}), '(z[..., :] == 0, axis=0)\n', (2448, 2472), True, 'import numpy as np\n'), ((509, 523), 'numpy.ix_', 'np.ix_', (['nn', 'nn'], {}), '(nn, nn)\n', (515, 523), True, 'import numpy as np\n'), ((784, 809), 'numpy.delete', 'np.delete', (['z', 'idx'], {'axis': '(1)'}), '(z, idx, axis=1)\n', (793, 809), True, 'import numpy as np\n'), ((906, 922), 'numpy.sum', 'np.sum', (['z[nn]', '(0)'], {}), '(z[nn], 0)\n', (912, 922), True, 'import numpy as np\n'), ((1513, 1547), 'scipy.special.betaln', 'betaln', (['(M1 + R + a)', '(M0 + M - R + b)'], {}), '(M1 + R + a, M0 + M - R + b)\n', (1519, 1547), False, 'from scipy.special import betaln\n'), ((1540, 1562), 'scipy.special.betaln', 'betaln', (['(M1 + a)', '(M0 + b)'], {}), '(M1 + a, M0 + b)\n', (1546, 1562), False, 'from scipy.special import betaln\n'), ((1637, 1661), 'scipy.special.betaln', 'betaln', (['(r + a)', '(m - r + b)'], {}), '(r + a, m - r + b)\n', (1643, 1661), False, 'from scipy.special import betaln\n'), ((1658, 1670), 'scipy.special.betaln', 'betaln', (['a', 'b'], {}), '(a, b)\n', (1664, 1670), False, 'from scipy.special import betaln\n'), ((1699, 1741), 'numpy.concatenate', 'np.concatenate', (['[likelihood, likelihood_n]'], {}), '([likelihood, likelihood_n])\n', (1713, 1741), True, 'import numpy as np\n'), ((1776, 1791), 'numpy.append', 'np.append', (['m', 'A'], {}), '(m, A)\n', (1785, 1791), True, 'import numpy as np\n'), ((741, 757), 'numpy.sum', 'np.sum', (['z[nn]', '(0)'], {}), '(z[nn], 0)\n', (747, 757), True, 'import numpy as np\n'), ((1103, 1132), 'numpy.sum', 'np.sum', (['(X_ @ z[nn] * z[nn])', '(0)'], {}), '(X_ @ z[nn] * z[nn], 0)\n', (1109, 1132), True, 'import numpy as np\n'), ((2329, 2345), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (2337, 2345), True, 'import numpy as np\n'), ((2113, 2125), 'numpy.cumsum', 'np.cumsum', (['P'], {}), '(P)\n', (2122, 2125), True, 'import numpy as np\n')]
|
from __future__ import annotations
import logging
import typing as t
from fastapi import APIRouter, FastAPI
from fastapi.param_functions import Depends
from starlette.responses import (
JSONResponse,
Response,
)
from api.config import get_config
from api.postgres import Connection, Postgres, connect_and_migrate
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/")
async def get_home(conn: Connection = Depends(Postgres.connection)) -> Response:
users = await conn.fetch("SELECT username FROM users;")
return JSONResponse(content={"users": [user[1] for user in users]})
def create_app() -> FastAPI:
config = get_config()
async def on_startup() -> None:
await connect_and_migrate(config.postgres)
async def on_shutdown() -> None:
await Postgres.disconnect()
app = FastAPI(
openapi_url=None,
on_startup=[on_startup],
on_shutdown=[on_shutdown],
# CUSTOMIZE
# exception_handlers={},
)
# CUSTOMIZE
# app.mount(
# "/static", app=StaticFiles(directory=config.static_dir, html=True)
# )
app.include_router(router)
return app
|
[
"api.config.get_config",
"api.postgres.connect_and_migrate",
"fastapi.param_functions.Depends",
"starlette.responses.JSONResponse",
"fastapi.FastAPI",
"api.postgres.Postgres.disconnect",
"logging.getLogger",
"fastapi.APIRouter"
] |
[((334, 361), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (351, 361), False, 'import logging\n'), ((371, 382), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (380, 382), False, 'from fastapi import APIRouter, FastAPI\n'), ((440, 468), 'fastapi.param_functions.Depends', 'Depends', (['Postgres.connection'], {}), '(Postgres.connection)\n', (447, 468), False, 'from fastapi.param_functions import Depends\n'), ((554, 614), 'starlette.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'users': [user[1] for user in users]}"}), "(content={'users': [user[1] for user in users]})\n", (566, 614), False, 'from starlette.responses import JSONResponse, Response\n'), ((659, 671), 'api.config.get_config', 'get_config', ([], {}), '()\n', (669, 671), False, 'from api.config import get_config\n'), ((845, 922), 'fastapi.FastAPI', 'FastAPI', ([], {'openapi_url': 'None', 'on_startup': '[on_startup]', 'on_shutdown': '[on_shutdown]'}), '(openapi_url=None, on_startup=[on_startup], on_shutdown=[on_shutdown])\n', (852, 922), False, 'from fastapi import APIRouter, FastAPI\n'), ((723, 759), 'api.postgres.connect_and_migrate', 'connect_and_migrate', (['config.postgres'], {}), '(config.postgres)\n', (742, 759), False, 'from api.postgres import Connection, Postgres, connect_and_migrate\n'), ((812, 833), 'api.postgres.Postgres.disconnect', 'Postgres.disconnect', ([], {}), '()\n', (831, 833), False, 'from api.postgres import Connection, Postgres, connect_and_migrate\n')]
|
import networkx as nx
import matplotlib.pyplot as plt
G=nx.cubical_graph() #Return the 3-regular Platonic Cubical graph.
plt.subplot(121) #subplot(nrows, ncols, index)
nx.draw(G) # default spring_layout
plt.subplot(122)
nx.draw(G, pos=nx.circular_layout(G), nodecolor='r', edge_color='b')
|
[
"networkx.cubical_graph",
"matplotlib.pyplot.subplot",
"networkx.draw",
"networkx.circular_layout"
] |
[((57, 75), 'networkx.cubical_graph', 'nx.cubical_graph', ([], {}), '()\n', (73, 75), True, 'import networkx as nx\n'), ((122, 138), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (133, 138), True, 'import matplotlib.pyplot as plt\n'), ((169, 179), 'networkx.draw', 'nx.draw', (['G'], {}), '(G)\n', (176, 179), True, 'import networkx as nx\n'), ((205, 221), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (216, 221), True, 'import matplotlib.pyplot as plt\n'), ((237, 258), 'networkx.circular_layout', 'nx.circular_layout', (['G'], {}), '(G)\n', (255, 258), True, 'import networkx as nx\n')]
|
from flask import Flask
from flask import render_template
app = Flask(__name__)
# main routes
@app.route('/')
def index():
return render_template('index.html')
@app.route('/home')
def home():
return render_template('home.html')
|
[
"flask.Flask",
"flask.render_template"
] |
[((65, 80), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (70, 80), False, 'from flask import Flask\n'), ((136, 165), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (151, 165), False, 'from flask import render_template\n'), ((210, 238), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (225, 238), False, 'from flask import render_template\n')]
|
import datetime
END_DATE = datetime.date(2019, 4, 25)
STATE2DESCRIPTOR = {0: 'REST', 1: 'SLEEP', 2: 'ACTIVE', 3: 'RUN', 4: 'EAT&DRINK', 5: 'EAT'}
DAY_NIGHTS = ['day', 'night']
STATES = list(STATE2DESCRIPTOR.values())
STATE_AGG_BASE_FEATURES = ['VO2', 'VCO2', 'VH2O', 'KCal_hr', 'RQ', 'Food', 'PedMeters', 'AllMeters']
STATE_AGGREGATE_FNS = ['nanmean', 'pp99']
CIRCADIAN_AGG_BASE_FEATURES = ["VO2", "RQ", "KCal_hr", "Food", "Water", "BodyMass", "WheelSpeed",
"WheelMeters", 'PedMeters', "AllMeters", "XBreak", "YBreak", "ZBreak"]
CIRCADIAN_AGGREGATE_FNS = ['mean', 'pp99']
INDEX_VARS = ['mouse_id', 'trace_id', 'date', 'age_in_months', 'day_of_week']
AGE_ORDER = ['00-03 months', '03-06 months', '06-09 months', '09-12 months',
'12-15 months', '15-18 months', '18-21 months', '21-24 months',
'24-27 months', '27-30 months', '30-33 months',
'33 months or older']
|
[
"datetime.date"
] |
[((29, 55), 'datetime.date', 'datetime.date', (['(2019)', '(4)', '(25)'], {}), '(2019, 4, 25)\n', (42, 55), False, 'import datetime\n')]
|
import os
import re
import sys
import platform
import subprocess
import versioneer
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
cpus = os.cpu_count()
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(os.path.join(here, 'requirements.txt')) as f:
requirements = f.read().split()
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following"
"extensions: , ".join(
e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(
re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
# cfg = 'Debug' if self.debug else 'Release'
cfg = 'RelWithDebInfo'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'
.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--target', '_pyantplus']
build_args += ['--', '-j{}'.format(cpus)]
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DPY_VERSION_INFO=\\"{}\\"' .format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
cmdclass = versioneer.get_cmdclass()
cmdclass['build_ext'] = CMakeBuild
setup(
name='pyantplus',
author='<NAME>',
author_email='<EMAIL>',
description='ANT+ Utilities',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
url='https://github.com/stuwilkins/ant-recorder',
packages=find_packages(),
install_requires=requirements,
setup_requires=["pytest-runner"],
tests_require=["pytest"],
ext_modules=[CMakeExtension('_pyantplus')],
cmdclass=cmdclass,
zip_safe=False,
version=versioneer.get_version(),
)
|
[
"versioneer.get_version",
"os.path.abspath",
"setuptools.find_packages",
"os.path.join",
"os.makedirs",
"os.path.dirname",
"os.environ.copy",
"subprocess.check_output",
"os.path.exists",
"versioneer.get_cmdclass",
"os.cpu_count",
"platform.system",
"setuptools.Extension.__init__",
"subprocess.check_call"
] |
[((241, 255), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (253, 255), False, 'import os\n'), ((2774, 2799), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (2797, 2799), False, 'import versioneer\n'), ((279, 304), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (294, 304), False, 'import os\n'), ((317, 348), 'os.path.join', 'os.path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (329, 348), False, 'import os\n'), ((417, 455), 'os.path.join', 'os.path.join', (['here', '"""requirements.txt"""'], {}), "(here, 'requirements.txt')\n", (429, 455), False, 'import os\n'), ((586, 628), 'setuptools.Extension.__init__', 'Extension.__init__', (['self', 'name'], {'sources': '[]'}), '(self, name, sources=[])\n', (604, 628), False, 'from setuptools import setup, Extension, find_packages\n'), ((654, 680), 'os.path.abspath', 'os.path.abspath', (['sourcedir'], {}), '(sourcedir)\n', (669, 680), False, 'import os\n'), ((2255, 2272), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (2270, 2272), False, 'import os\n'), ((2519, 2614), 'subprocess.check_call', 'subprocess.check_call', (["(['cmake', ext.sourcedir] + cmake_args)"], {'cwd': 'self.build_temp', 'env': 'env'}), "(['cmake', ext.sourcedir] + cmake_args, cwd=self.\n build_temp, env=env)\n", (2540, 2614), False, 'import subprocess\n'), ((2648, 2735), 'subprocess.check_call', 'subprocess.check_call', (["(['cmake', '--build', '.'] + build_args)"], {'cwd': 'self.build_temp'}), "(['cmake', '--build', '.'] + build_args, cwd=self.\n build_temp)\n", (2669, 2735), False, 'import subprocess\n'), ((3124, 3139), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (3137, 3139), False, 'from setuptools import setup, Extension, find_packages\n'), ((3347, 3371), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (3369, 3371), False, 'import versioneer\n'), ((762, 809), 'subprocess.check_output', 'subprocess.check_output', (["['cmake', '--version']"], {}), "(['cmake', '--version'])\n", (785, 809), False, 'import subprocess\n'), ((1049, 1066), 'platform.system', 'platform.system', ([], {}), '()\n', (1064, 1066), False, 'import platform\n'), ((1787, 1804), 'platform.system', 'platform.system', ([], {}), '()\n', (1802, 1804), False, 'import platform\n'), ((2437, 2468), 'os.path.exists', 'os.path.exists', (['self.build_temp'], {}), '(self.build_temp)\n', (2451, 2468), False, 'import os\n'), ((2482, 2510), 'os.makedirs', 'os.makedirs', (['self.build_temp'], {}), '(self.build_temp)\n', (2493, 2510), False, 'import os\n')]
|
# Twitcaspy
# Copyright 2021 Alma-field
# See LICENSE for details.
from twitcaspy import API, AppAuthHandler
# The client id and/or secret can be found on your application's Details page
# located at select app in https://twitcasting.tv/developer.php
# (in "details" tab)
CLIENT_ID = ''
CLIENT_SECRET = ''
auth = AppAuthHandler(CLIENT_ID, CLIENT_SECRET)
api = API(auth)
# Target User ID and screen ID
user_id = '182224938'
screen_id = 'twitcasting_jp'
# If the authentication was successful, you should
# see the name of the account print out
print(api.get_user_info(id=user_id).user.name)
result = api.get_webhook_list()
print(result.all_count)
for webhook in result.webhooks:
print(f'{webhook.user_id}: {event}')
|
[
"twitcaspy.AppAuthHandler",
"twitcaspy.API"
] |
[((316, 356), 'twitcaspy.AppAuthHandler', 'AppAuthHandler', (['CLIENT_ID', 'CLIENT_SECRET'], {}), '(CLIENT_ID, CLIENT_SECRET)\n', (330, 356), False, 'from twitcaspy import API, AppAuthHandler\n'), ((363, 372), 'twitcaspy.API', 'API', (['auth'], {}), '(auth)\n', (366, 372), False, 'from twitcaspy import API, AppAuthHandler\n')]
|
from django.conf import settings
from django.http import JsonResponse
from common import errors
def render_json(code=errors.OK, data=None):
"""
自定义 json 输出
:param code:
:param data:
:return:
"""
result = {
'code': code
}
if data:
result['data'] = data
if settings.DEBUG:
json_dumps_params = {'indent': 4, 'ensure_ascii': False}
else:
json_dumps_params = {'separators': (',', ':')}
return JsonResponse(result, safe=False, json_dumps_params=json_dumps_params)
|
[
"django.http.JsonResponse"
] |
[((475, 544), 'django.http.JsonResponse', 'JsonResponse', (['result'], {'safe': '(False)', 'json_dumps_params': 'json_dumps_params'}), '(result, safe=False, json_dumps_params=json_dumps_params)\n', (487, 544), False, 'from django.http import JsonResponse\n')]
|
import sys
sys.path.append('./model')
import argparse
import torch
import numpy as np
from model.model import NCNet
import torchvision.transforms as transforms
from dataloader import TrainLoader, ValLoader
from loss import WeakLoss
import torch.optim as optim
import json
import os
## Parameters
parser = argparse.ArgumentParser(description='Nc-Net Training')
## Input / Output
parser.add_argument('--outDir', type=str, help='output model directory')
parser.add_argument('--resumePth', type=str, help='resume model path')
parser.add_argument('--featExtractorPth', type=str, default = 'model/FeatureExtractor/resnet18.pth', help='feature extractor path')
parser.add_argument('--imgDir', type=str, default = 'data/pf-pascal/JPEGImages/', help='image Directory')
parser.add_argument('--trainCSV', type=str, default = 'data/pf-pascal/train.csv', help='train csv')
parser.add_argument('--valCSV', type=str, default = 'data/pf-pascal/val.csv', help='val csv')
parser.add_argument('--imgSize', type=int, default = 400, help='train image size')
## learning parameter
parser.add_argument('--lr', type=float, default=5e-4, help='learning rate')
parser.add_argument('--batchSize', type=int, default=16, help='batch size')
parser.add_argument('--nbEpoch', type=int, default=5, help='number of training epochs')
parser.add_argument('--neighConsKernel', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')
parser.add_argument('--neighConsChannel', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')
parser.add_argument('--finetuneFeatExtractor', action='store_true', help='whether fine-tuning feature extractor')
parser.add_argument('--featExtractor', type=str, default='ResNet18Conv4', choices=['ResNet18Conv4', 'ResNet18Conv5'], help='feature extractor')
parser.add_argument('--cuda', action='store_true', help='GPU setting')
parser.add_argument('--softmaxMM', action='store_true', help='whether use softmax Mutual Matching')
args = parser.parse_args()
print(args)
## Set seed
torch.manual_seed(1)
if args.cuda:
torch.cuda.manual_seed(1)
else :
raise RuntimeError('CPU Version is not supported yet.')
np.random.seed(1)
## Initial Model
model = NCNet(kernel_sizes=args.neighConsKernel,
channels=args.neighConsChannel,
featExtractor = args.featExtractor,
featExtractorPth = args.featExtractorPth,
finetuneFeatExtractor = args.finetuneFeatExtractor,
softmaxMutualMatching = args.softmaxMM)
if not args.finetuneFeatExtractor:
msg = '\nIgnore the gradient for the parameters in the feature extractor'
print (msg)
for p in model.featExtractor.parameters():
p.requires_grad=False
if args.resumePth :
msg = '\nResume from {}'.format(args.resumePth)
model.load_state_dict(torch.load(args.resumePth))
if args.cuda :
model.cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
## Train Val DataLoader
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # ImageNet normalization
trainTransform = transforms.Compose([transforms.RandomResizedCrop(args.imgSize),
transforms.ToTensor(),
normalize,])
valTransform = transforms.Compose([transforms.Resize(args.imgSize),
transforms.CenterCrop(args.imgSize),
transforms.ToTensor(),
normalize,])
trainLoader = TrainLoader(batchSize=args.batchSize,
pairCSV=args.trainCSV,
imgDir = args.imgDir,
trainTransform = trainTransform)
valLoader = ValLoader(batchSize=args.batchSize,
pairCSV=args.valCSV,
imgDir = args.imgDir,
valTransform = valTransform)
if not os.path.exists(args.outDir) :
os.mkdir(args.outDir)
# Train
bestValLoss = np.inf
history = {'TrainLoss' : [], 'ValLoss' : []}
outHistory = os.path.join(args.outDir, 'history.json')
outModel = os.path.join(args.outDir, 'netBest.pth')
for epoch in range(1, args.nbEpoch + 1) :
trainLoss = 0.
valLoss = 0.
for i, batch in enumerate(trainLoader) :
optimizer.zero_grad()
if args.cuda :
batch['source_image'] = batch['source_image'].cuda()
batch['target_image'] = batch['target_image'].cuda()
loss = WeakLoss(model, batch, args.softmaxMM)
loss.backward()
optimizer.step()
trainLoss += loss.item()
if i % 30 == 29 :
msg = '\nEpoch {:d}, Batch {:d}, Train Loss : {:.4f}'.format(epoch, i + 1, trainLoss / (i + 1))
print (msg)
## Validation
trainLoss = trainLoss / len(trainLoader)
with torch.no_grad() :
for i, batch in enumerate(valLoader) :
if args.cuda :
batch['source_image'] = batch['source_image'].cuda()
batch['target_image'] = batch['target_image'].cuda()
loss = WeakLoss(model, batch, args.softmaxMM)
valLoss += loss.item()
valLoss = valLoss / len(valLoader)
msg = 'Epoch {:d}, Train Loss : {:.4f}, Val Loss : {:.4f}'.format(epoch, trainLoss , valLoss)
with open(outHistory, 'w') as f :
json.dump(history, f)
print (msg)
if valLoss < bestValLoss :
msg = 'Validation Loss Improved from {:.4f} to {:.4f}'.format(bestValLoss, valLoss)
print (msg)
bestValLoss = valLoss
torch.save(model.state_dict(), outModel)
finalOut = os.path.join(args.outDir, 'netBest{:.3f}.pth'.format(bestValLoss))
cmd = 'mv {} {}'.format(outModel, finalOut)
os.system(cmd)
|
[
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"model.model.NCNet",
"loss.WeakLoss",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"sys.path.append",
"torch.load",
"os.path.exists",
"dataloader.ValLoader",
"torchvision.transforms.CenterCrop",
"json.dump",
"torch.manual_seed",
"dataloader.TrainLoader",
"torch.cuda.manual_seed",
"os.system",
"torchvision.transforms.Resize",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.ToTensor"
] |
[((12, 38), 'sys.path.append', 'sys.path.append', (['"""./model"""'], {}), "('./model')\n", (27, 38), False, 'import sys\n'), ((311, 365), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Nc-Net Training"""'}), "(description='Nc-Net Training')\n", (334, 365), False, 'import argparse\n'), ((2024, 2044), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (2041, 2044), False, 'import torch\n'), ((2157, 2174), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2171, 2174), True, 'import numpy as np\n'), ((2201, 2448), 'model.model.NCNet', 'NCNet', ([], {'kernel_sizes': 'args.neighConsKernel', 'channels': 'args.neighConsChannel', 'featExtractor': 'args.featExtractor', 'featExtractorPth': 'args.featExtractorPth', 'finetuneFeatExtractor': 'args.finetuneFeatExtractor', 'softmaxMutualMatching': 'args.softmaxMM'}), '(kernel_sizes=args.neighConsKernel, channels=args.neighConsChannel,\n featExtractor=args.featExtractor, featExtractorPth=args.\n featExtractorPth, finetuneFeatExtractor=args.finetuneFeatExtractor,\n softmaxMutualMatching=args.softmaxMM)\n', (2206, 2448), False, 'from model.model import NCNet\n'), ((3034, 3100), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (3054, 3100), True, 'import torchvision.transforms as transforms\n'), ((3589, 3705), 'dataloader.TrainLoader', 'TrainLoader', ([], {'batchSize': 'args.batchSize', 'pairCSV': 'args.trainCSV', 'imgDir': 'args.imgDir', 'trainTransform': 'trainTransform'}), '(batchSize=args.batchSize, pairCSV=args.trainCSV, imgDir=args.\n imgDir, trainTransform=trainTransform)\n', (3600, 3705), False, 'from dataloader import TrainLoader, ValLoader\n'), ((3825, 3932), 'dataloader.ValLoader', 'ValLoader', ([], {'batchSize': 'args.batchSize', 'pairCSV': 'args.valCSV', 'imgDir': 'args.imgDir', 'valTransform': 'valTransform'}), '(batchSize=args.batchSize, pairCSV=args.valCSV, imgDir=args.imgDir,\n valTransform=valTransform)\n', (3834, 3932), False, 'from dataloader import TrainLoader, ValLoader\n'), ((4177, 4218), 'os.path.join', 'os.path.join', (['args.outDir', '"""history.json"""'], {}), "(args.outDir, 'history.json')\n", (4189, 4218), False, 'import os\n'), ((4230, 4270), 'os.path.join', 'os.path.join', (['args.outDir', '"""netBest.pth"""'], {}), "(args.outDir, 'netBest.pth')\n", (4242, 4270), False, 'import os\n'), ((5914, 5928), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (5923, 5928), False, 'import os\n'), ((2063, 2088), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1)'], {}), '(1)\n', (2085, 2088), False, 'import torch\n'), ((4023, 4050), 'os.path.exists', 'os.path.exists', (['args.outDir'], {}), '(args.outDir)\n', (4037, 4050), False, 'import os\n'), ((4058, 4079), 'os.mkdir', 'os.mkdir', (['args.outDir'], {}), '(args.outDir)\n', (4066, 4079), False, 'import os\n'), ((2836, 2862), 'torch.load', 'torch.load', (['args.resumePth'], {}), '(args.resumePth)\n', (2846, 2862), False, 'import torch\n'), ((3163, 3205), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['args.imgSize'], {}), '(args.imgSize)\n', (3191, 3205), True, 'import torchvision.transforms as transforms\n'), ((3244, 3265), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3263, 3265), True, 'import torchvision.transforms as transforms\n'), ((3354, 3385), 'torchvision.transforms.Resize', 'transforms.Resize', (['args.imgSize'], {}), '(args.imgSize)\n', (3371, 3385), True, 'import torchvision.transforms as transforms\n'), ((3425, 3460), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['args.imgSize'], {}), '(args.imgSize)\n', (3446, 3460), True, 'import torchvision.transforms as transforms\n'), ((3500, 3521), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3519, 3521), True, 'import torchvision.transforms as transforms\n'), ((4618, 4656), 'loss.WeakLoss', 'WeakLoss', (['model', 'batch', 'args.softmaxMM'], {}), '(model, batch, args.softmaxMM)\n', (4626, 4656), False, 'from loss import WeakLoss\n'), ((5007, 5022), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5020, 5022), False, 'import torch\n'), ((5530, 5551), 'json.dump', 'json.dump', (['history', 'f'], {}), '(history, f)\n', (5539, 5551), False, 'import json\n'), ((5264, 5302), 'loss.WeakLoss', 'WeakLoss', (['model', 'batch', 'args.softmaxMM'], {}), '(model, batch, args.softmaxMM)\n', (5272, 5302), False, 'from loss import WeakLoss\n')]
|
import sys
for i in range(5):
for j in range(i+1):
sys.stdout.write("*")
print()
|
[
"sys.stdout.write"
] |
[((63, 84), 'sys.stdout.write', 'sys.stdout.write', (['"""*"""'], {}), "('*')\n", (79, 84), False, 'import sys\n')]
|
import cgi
import json
from types import FunctionType
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, cast
import attr
from aiohttp import web
from .context import COMPONENTS
from .swagger import Swagger
from .validators import MISSING, Validator, ValidatorError, schema_to_validator, security_to_validator
_SwaggerHandler = Callable[..., Awaitable[web.StreamResponse]]
class RequestValidationFailed(web.HTTPBadRequest):
"""This exception can be caught in a aiohttp middleware.
:param dict errors: This dict stores validation errors.
"""
def __init__(self, errors: Dict, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.errors = errors
def _get_fn_parameters(fn: _SwaggerHandler) -> Tuple[str, ...]:
func = cast(FunctionType, fn)
if func.__closure__ is None:
arg_count = func.__code__.co_argcount + func.__code__.co_kwonlyargcount
return func.__code__.co_varnames[:arg_count]
return _get_fn_parameters(func.__closure__[0].cell_contents)
@attr.attrs(slots=True, auto_attribs=True)
class Parameter:
name: str
validator: Validator
required: bool
class SwaggerRoute:
__slots__ = (
"_swagger",
"method",
"path",
"handler",
"qp",
"pp",
"hp",
"cp",
"bp",
"auth",
"params",
)
def __init__(self, method: str, path: str, handler: _SwaggerHandler, *, swagger: Swagger) -> None:
self.method = method
self.path = path
self.handler = handler
self.qp: List[Parameter] = []
self.pp: List[Parameter] = []
self.hp: List[Parameter] = []
self.cp: List[Parameter] = []
self.bp: Dict[str, Parameter] = {}
self.auth: Optional[Parameter] = None
self._swagger = swagger
method_section = self._swagger.spec["paths"][path][method]
parameters = method_section.get("parameters")
body = method_section.get("requestBody")
security = method_section.get("security")
components = self._swagger.spec.get("components", {})
COMPONENTS.set(components)
if security is not None:
parameter = Parameter("", security_to_validator(security), True)
self.auth = parameter
if parameters is not None:
for param in parameters:
if "$ref" in param:
if not components:
raise Exception("file with components definitions is missing")
# '#/components/parameters/Month'
*_, section, obj = param["$ref"].split("/")
param = components[section][obj]
parameter = Parameter(
param["name"],
schema_to_validator(param["schema"]),
param.get("required", False),
)
if param["in"] == "query":
self.qp.append(parameter)
elif param["in"] == "path":
self.pp.append(parameter)
elif param["in"] == "header":
parameter.name = parameter.name.lower()
self.hp.append(parameter)
elif param["in"] == "cookie":
self.cp.append(parameter)
if body is not None:
for media_type, value in body["content"].items():
# check that we have handler for media_type
self._swagger._get_media_type_handler(media_type)
value = body["content"][media_type]
self.bp[media_type] = Parameter(
"body",
schema_to_validator(value["schema"]),
body.get("required", False),
)
self.params = set(_get_fn_parameters(self.handler))
async def parse(self, request: web.Request) -> Dict:
params = {}
if "request" in self.params:
params["request"] = request
request_key = self._swagger.request_key
request[request_key] = {}
errors: Dict = {}
# check auth
if self.auth:
try:
values = self.auth.validator.validate(request, True)
except ValidatorError as e:
if isinstance(e.error, str):
errors["authorization"] = e.error
else:
errors = e.error
raise RequestValidationFailed(reason=json.dumps(errors), errors=errors)
for key, value in values.items():
request[request_key][key] = value
# query parameters
if self.qp:
for param in self.qp:
if param.required:
try:
v: Any = request.rel_url.query.getall(param.name)
except KeyError:
errors[param.name] = "is required"
continue
if len(v) == 1:
v = v[0]
else:
v = request.rel_url.query.getall(param.name, MISSING)
if v != MISSING and len(v) == 1:
v = v[0]
try:
value = param.validator.validate(v, True)
except ValidatorError as e:
errors[param.name] = e.error
continue
if value != MISSING:
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
# body parameters
if self.bp:
if "Content-Type" not in request.headers:
if next(iter(self.bp.values())).required:
errors["body"] = "is required"
else:
media_type, _ = cgi.parse_header(request.headers["Content-Type"])
if media_type not in self.bp:
errors["body"] = f"no handler for {media_type}"
else:
handler = self._swagger._get_media_type_handler(media_type)
param = self.bp[media_type]
try:
v, has_raw = await handler(request)
except ValidatorError as e:
errors[param.name] = e.error
else:
try:
value = param.validator.validate(v, has_raw)
except ValidatorError as e:
errors[param.name] = e.error
else:
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
# header parameters
if self.hp:
for param in self.hp:
if param.required:
try:
v = request.headers.getone(param.name)
except KeyError:
errors[param.name] = "is required"
continue
else:
v = request.headers.get(param.name, MISSING)
try:
value = param.validator.validate(v, True)
except ValidatorError as e:
errors[param.name] = e.error
continue
if value != MISSING:
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
# path parameters
if self.pp:
for param in self.pp:
v = request.match_info[param.name]
try:
value = param.validator.validate(v, True)
except ValidatorError as e:
errors[param.name] = e.error
continue
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
# cookie parameters
if self.cp:
for param in self.cp:
if param.required:
try:
v = request.cookies[param.name]
except KeyError:
errors[param.name] = "is required"
continue
else:
v = request.cookies.get(param.name, MISSING)
try:
value = param.validator.validate(v, True)
except ValidatorError as e:
errors[param.name] = e.error
continue
if value != MISSING:
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
if errors:
raise RequestValidationFailed(reason=json.dumps(errors), errors=errors)
return params
|
[
"typing.cast",
"cgi.parse_header",
"attr.attrs",
"json.dumps"
] |
[((1042, 1083), 'attr.attrs', 'attr.attrs', ([], {'slots': '(True)', 'auto_attribs': '(True)'}), '(slots=True, auto_attribs=True)\n', (1052, 1083), False, 'import attr\n'), ((785, 807), 'typing.cast', 'cast', (['FunctionType', 'fn'], {}), '(FunctionType, fn)\n', (789, 807), False, 'from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, cast\n'), ((5899, 5948), 'cgi.parse_header', 'cgi.parse_header', (["request.headers['Content-Type']"], {}), "(request.headers['Content-Type'])\n", (5915, 5948), False, 'import cgi\n'), ((9050, 9068), 'json.dumps', 'json.dumps', (['errors'], {}), '(errors)\n', (9060, 9068), False, 'import json\n'), ((4508, 4526), 'json.dumps', 'json.dumps', (['errors'], {}), '(errors)\n', (4518, 4526), False, 'import json\n')]
|
from django_redis import get_redis_connection
import json
from .BaseOpration import BaseOpration
from big_screen.utils import tools as t
from big_screen.serialization.allSerialization import serMobile
from big_screen.utils import re_format as f
class defaultOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("default")
class sessionOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("session")
class isworkingOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("isworking")
self.mob = serMobile()
def formatter_info(self, info):
"""
格式转换
:param info:
:return:
"""
lnglat = info.get("lnglat")
address = t.getaddress(lnglat)
if address is 0:
address = {
"status": 1,
"district": f.UNKNOW_DISTRICT,
'formatted_address': "",
"data_from": "",
"adcode": f.UNKNOW_DISTRICT
}
info["address"] = address
return (info.get("mobile"), info)
class massmarkOp(BaseOpration):
def __init__(self):
"""
3号仓库
"""
BaseOpration.__init__(self)
self.con = get_redis_connection("massmark")
def resetMassMarkData(self, content):
"""
重置海量点数据
:param content:
:return:
"""
self.flush_db()
for con in content:
self.pushMassMarkData(con)
# ----------------- 数据插入 -----------------
def pushMassMarkData(self, info):
"""
插入海量点数据
:param info:
:return:
"""
k, v = info
self.list_push(k, v)
# --------------- 旧方法 ---------------------
def formatter_data_from_ser(self, info):
"""
用序列化器查询出的数据进行组织
:param info:
:return:
"""
content = dict()
lnglat = info.get("lnglat")
content["time"] = info.get("time")
content["address"] = info.get("address")
content["category"] = info.get("category__name")
content["freq"] = info.get("freq")
return (lnglat, content)
def formmater_data(self, info):
"""
处理数据
:param content:
:return:
"""
content = dict()
lnglat = info.get("lnglat")
content["time"] = info.get("time")
content["address"] = info.get("address")
content["category"] = info.get("category").name
content["freq"] = info.get("freq")
return (lnglat, content)
def get_for_view(self):
"""
为首页websocket组织数据
:return:
"""
content = list()
keys = self.get_keys()
if len(keys) is 0:
return content
else:
for key in keys:
info = dict()
info["lnglat"] = key.split(",")
data = self.list_get(key)
data = list(map(lambda info: json.loads(info), data))
info["address"] = list(map(lambda info: info.pop("address"), data))[0]
info["id_count"] = len(data)
info["data"] = data
content.append(info)
return content
class broadcastOp(BaseOpration):
def __init__(self):
"""
4号仓库
"""
BaseOpration.__init__(self)
self.con = get_redis_connection("broadcast")
self.scrollKey = "scroll_n"
self.heatmapKey = "heatmap_n"
# -------------- 重置 ----------------
def resetScrollData(self, content):
"""
重置轮播表数据
:param content:
:return:
"""
self.del_key(self.scrollKey)
for con in content:
self.pushScrollData(con)
def resetHeatMapData(self, content):
"""
重置热力图数据
:param content:
:return:
"""
self.del_key(self.heatmapKey)
for con in content:
self.pushHeatMapData(con)
# -------------- 数据插入 ---------------
def pushScrollData(self, info):
"""
插入轮播表数据
:param info:
:return:
"""
self.list_push(self.scrollKey, info)
def pushHeatMapData(self, info):
"""
插入热力图数据
:param info:
:return:
"""
self.list_push(self.heatmapKey, info)
# ------------- 旧方法 -----------------
def formatter_scroll_info(self, info):
"""
格式化轮播表数据
:param info:
:return:
"""
content = list()
content.append(info.get("time"))
content.append(info.get("freq"))
content.append(info.get("category").name)
content.append(info.get("address"))
return content
def formatter_heatmap_info(self, info):
"""
格式化热力图数据
:param info:
:return:
"""
content = dict()
content["time"] = info.get("time")
lnglat = info.get("lnglat").split(",")
content["lng"] = lnglat[0]
content["lat"] = lnglat[1]
content["count"] = 1
return content
def formatter_scroll_info_from_ser(self, info):
"""
格式化轮播表数据,数据来源为序列化器
:param info:
:return:
"""
content = list()
content.append(info.get("time"))
content.append(info.get("freq"))
content.append(info.get("category__name"))
content.append(info.get("address"))
return content
class chartOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("chart")
class whdisOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("whdis")
class MobListOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("mob-list")
self.mob = serMobile()
def get_mob_list(self):
"""
获取合法手机id列表
:return:
"""
result = self.kv_get("mob-list")
if result == "no this key":
mob_list = self.mob.get_mobile_list()
result = mob_list
self.kv_set("mob-list", mob_list)
return result
def update_mob_list(self):
mob_list = self.mob.get_mobile_list()
self.kv_set("mob-list", mob_list)
class ObjectOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("object")
self.mob = serMobile()
def get_mob_list(self):
"""
获取合法手机id列表
:return:
"""
result = self.kv_get("mob-list")
if result == "no this key":
mob_list = self.mob.get_mobile_list()
result = mob_list
self.kv_set("mob-list", mob_list)
return result
def update_mob_list(self):
mob_list = self.mob.get_mobile_list()
self.kv_set("mob-list", mob_list)
|
[
"django_redis.get_redis_connection",
"big_screen.utils.tools.getaddress",
"big_screen.serialization.allSerialization.serMobile",
"json.loads"
] |
[((358, 389), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""default"""'], {}), "('default')\n", (378, 389), False, 'from django_redis import get_redis_connection\n'), ((502, 533), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""session"""'], {}), "('session')\n", (522, 533), False, 'from django_redis import get_redis_connection\n'), ((648, 681), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""isworking"""'], {}), "('isworking')\n", (668, 681), False, 'from django_redis import get_redis_connection\n'), ((701, 712), 'big_screen.serialization.allSerialization.serMobile', 'serMobile', ([], {}), '()\n', (710, 712), False, 'from big_screen.serialization.allSerialization import serMobile\n'), ((879, 899), 'big_screen.utils.tools.getaddress', 't.getaddress', (['lnglat'], {}), '(lnglat)\n', (891, 899), True, 'from big_screen.utils import tools as t\n'), ((1383, 1415), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""massmark"""'], {}), "('massmark')\n", (1403, 1415), False, 'from django_redis import get_redis_connection\n'), ((3527, 3560), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""broadcast"""'], {}), "('broadcast')\n", (3547, 3560), False, 'from django_redis import get_redis_connection\n'), ((5719, 5748), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""chart"""'], {}), "('chart')\n", (5739, 5748), False, 'from django_redis import get_redis_connection\n'), ((5859, 5888), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""whdis"""'], {}), "('whdis')\n", (5879, 5888), False, 'from django_redis import get_redis_connection\n'), ((6001, 6033), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""mob-list"""'], {}), "('mob-list')\n", (6021, 6033), False, 'from django_redis import get_redis_connection\n'), ((6053, 6064), 'big_screen.serialization.allSerialization.serMobile', 'serMobile', ([], {}), '()\n', (6062, 6064), False, 'from big_screen.serialization.allSerialization import serMobile\n'), ((6610, 6640), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""object"""'], {}), "('object')\n", (6630, 6640), False, 'from django_redis import get_redis_connection\n'), ((6660, 6671), 'big_screen.serialization.allSerialization.serMobile', 'serMobile', ([], {}), '()\n', (6669, 6671), False, 'from big_screen.serialization.allSerialization import serMobile\n'), ((3123, 3139), 'json.loads', 'json.loads', (['info'], {}), '(info)\n', (3133, 3139), False, 'import json\n')]
|
import logging
from collections import deque
from abc import ABC, abstractmethod
import math
from lxml import etree
from pyqtree import Index as PqtreeIndex
from PIL import Image, ImageDraw, ImageColor
from sciencebeam_gym.structured_document.svg import (
SVG_NSMAP,
SVG_DOC,
SVG_RECT,
)
DEFAULT_NEARBY_TOLERANCE = 5
def get_logger():
return logging.getLogger(__name__)
class AnnotationBlock(object):
def __init__(self, tag, bounding_box):
self.tag = tag
self.bounding_box = bounding_box
def merge_with(self, other):
return AnnotationBlock(
self.tag,
self.bounding_box.include(other.bounding_box)
)
def __str__(self):
return 'AnnotationBlock({}, {})'.format(self.tag, self.bounding_box)
def __repr__(self):
return str(self)
class BlockPoint(object):
def __init__(self, block, x, y):
self.block = block
self.point = (x, y)
def __str__(self):
return 'BlockPoint({}, {})'.format(self.block, self.point)
def __repr__(self):
return str(self)
def __len__(self):
return len(self.point)
def __getitem__(self, index):
return self.point[index]
def _to_bbox(bb):
return (bb.x, bb.y, bb.x + bb.width - 1, bb.y + bb.height - 1)
class DeletableWrapper(object):
def __init__(self, data):
self.data = data
self.deleted = False
def __hash__(self):
return hash(self.data)
def __eq__(self, other):
return self.data == other.data
class BlockSearch(object):
def __init__(self, blocks):
bboxs = [block.bounding_box for block in blocks]
xmax = max([bb.x + bb.width for bb in bboxs])
ymax = max([bb.y + bb.height for bb in bboxs])
self.spindex = PqtreeIndex(bbox=(0, 0, xmax, ymax))
self.wrapper_map = {}
for block in blocks:
wrapper = DeletableWrapper(block)
self.wrapper_map[block] = wrapper
self.spindex.insert(wrapper, _to_bbox(block.bounding_box))
def find_intersection_with(self, search_bounding_box):
return [
wrapper.data
for wrapper in self.spindex.intersect(_to_bbox(search_bounding_box))
if not wrapper.deleted
]
def remove(self, block):
wrapper = self.wrapper_map.get(block)
if wrapper is not None:
wrapper.deleted = True
def merge_blocks(blocks, nearby_tolerance=0):
if len(blocks) <= 1:
return blocks
merged_blocks = deque()
logger = get_logger()
logger.debug('nearby_tolerance: %s', nearby_tolerance)
logger.debug('blocks: %s', blocks)
logger.debug('bboxs: %s', [_to_bbox(block.bounding_box) for block in blocks])
tags = sorted({b.tag for b in blocks})
logger.debug('tags: %s', tags)
remaining_blocks = deque(blocks)
search_by_tag = {
tag: BlockSearch([b for b in remaining_blocks if b.tag == tag])
for tag in tags
}
while len(remaining_blocks) >= 2:
merged_block = remaining_blocks.popleft()
search = search_by_tag[merged_block.tag]
search.remove(merged_block)
search_bounding_box = merged_block.bounding_box.with_margin(1 + nearby_tolerance, 0)
logger.debug('search_bounding_box: %s (%s)',
search_bounding_box, _to_bbox(search_bounding_box))
neighbours = search.find_intersection_with(search_bounding_box)
logger.debug('neighbours: %s', neighbours)
neighbours_blocks_count = 0
for neighbour in neighbours:
if neighbour.tag == merged_block.tag:
merged_block = merged_block.merge_with(neighbour)
search.remove(neighbour)
remaining_blocks.remove(neighbour)
neighbours_blocks_count += 1
if neighbours_blocks_count == 0 or len(remaining_blocks) == 0:
logger.debug(
'no or all remaining blocks merged, mark block as merged: %d',
neighbours_blocks_count
)
merged_blocks.append(merged_block)
else:
logger.debug(
'some but not all remaining blocks merged, continue search: %d',
neighbours_blocks_count
)
remaining_blocks.appendleft(merged_block)
result = list(merged_blocks) + list(remaining_blocks)
return result
def expand_bounding_box(bb):
return bb.with_margin(4, 2)
def expand_block(block):
return AnnotationBlock(block.tag, expand_bounding_box(block.bounding_box))
def expand_blocks(blocks):
return [expand_block(block) for block in blocks]
def annotation_document_page_to_annotation_blocks(structured_document, page):
tags_and_tokens = (
(structured_document.get_tag_value(token), token)
for line in structured_document.get_lines_of_page(page)
for token in structured_document.get_tokens_of_line(line)
)
tags_and_bounding_boxes = (
(tag, structured_document.get_bounding_box(token))
for tag, token in tags_and_tokens
if tag
)
return [
AnnotationBlock(tag, bounding_box)
for tag, bounding_box in tags_and_bounding_boxes
if bounding_box
]
def annotation_document_page_to_merged_blocks(structured_document, page, **kwargs):
return merge_blocks(
annotation_document_page_to_annotation_blocks(structured_document, page),
**kwargs
)
def extend_color_map_for_tags(color_map, tags):
updated_color_map = dict(color_map)
for tag in tags:
if tag not in updated_color_map:
updated_color_map[tag] = (
max(updated_color_map.values()) + 1 if len(updated_color_map) > 0 else 1
)
return updated_color_map
def extend_color_map_for_blocks(color_map, blocks):
return extend_color_map_for_tags(
color_map,
sorted({b.tag for b in blocks})
)
class AbstractSurface(ABC):
@abstractmethod
def rect(self, bounding_box, color, tag=None):
pass
class SvgSurface(AbstractSurface):
def __init__(self, width, height, background):
if not (width and height):
raise AttributeError('width and height are required')
self.svg_root = etree.Element(SVG_DOC, nsmap=SVG_NSMAP, attrib={
'width': str(width),
'height': str(height)
})
if background:
self.svg_root.append(etree.Element(SVG_RECT, attrib={
'width': '100%',
'height': '100%',
'fill': background,
'class': 'background'
}))
def rect(self, bounding_box, color, tag=None):
attrib = {
'class': str(tag),
'shape-rendering': 'crispEdges',
'x': str(bounding_box.x),
'y': str(bounding_box.y),
'width': str(bounding_box.width),
'height': str(bounding_box.height)
}
if color:
attrib['fill'] = str(color)
rect = etree.Element(SVG_RECT, attrib=attrib)
self.svg_root.append(rect)
return rect
def color_to_tuple(color):
if isinstance(color, tuple):
return color
return ImageColor.getrgb(color)
class ImageSurface(AbstractSurface):
def __init__(self, width, height, background):
if not (width and height):
raise AttributeError('width and height are required')
width = int(math.ceil(width))
height = int(math.ceil(height))
if background:
self.image = Image.new('RGB', (width, height), color_to_tuple(background))
else:
self.image = Image.new('RGBA', (width, height), (255, 255, 255, 0))
self._draw = ImageDraw.Draw(self.image)
def rect(self, bounding_box, color, tag=None):
if color is None:
return
self._draw.rectangle(
(
(bounding_box.x, bounding_box.y),
(bounding_box.x + bounding_box.width, bounding_box.y + bounding_box.height)
),
fill=color_to_tuple(color)
)
def annotated_blocks_to_surface(blocks, surface, color_map):
for block in blocks:
color = color_map.get(block.tag)
surface.rect(block.bounding_box, color, block.tag)
def annotated_blocks_to_svg(blocks, color_map, width=None, height=None, background=None):
surface = SvgSurface(width, height, background)
annotated_blocks_to_surface(blocks, surface, color_map)
return surface.svg_root
def annotated_blocks_to_image(
blocks, color_map, width=None, height=None, background=None,
scale_to_size=None):
surface = ImageSurface(width, height, background)
annotated_blocks_to_surface(blocks, surface, color_map)
image = surface.image
if scale_to_size:
image = image.resize(scale_to_size, Image.NEAREST)
return image
|
[
"PIL.Image.new",
"lxml.etree.Element",
"math.ceil",
"pyqtree.Index",
"PIL.ImageColor.getrgb",
"logging.getLogger",
"PIL.ImageDraw.Draw",
"collections.deque"
] |
[((363, 390), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (380, 390), False, 'import logging\n'), ((2547, 2554), 'collections.deque', 'deque', ([], {}), '()\n', (2552, 2554), False, 'from collections import deque\n'), ((2862, 2875), 'collections.deque', 'deque', (['blocks'], {}), '(blocks)\n', (2867, 2875), False, 'from collections import deque\n'), ((7254, 7278), 'PIL.ImageColor.getrgb', 'ImageColor.getrgb', (['color'], {}), '(color)\n', (7271, 7278), False, 'from PIL import Image, ImageDraw, ImageColor\n'), ((1802, 1838), 'pyqtree.Index', 'PqtreeIndex', ([], {'bbox': '(0, 0, xmax, ymax)'}), '(bbox=(0, 0, xmax, ymax))\n', (1813, 1838), True, 'from pyqtree import Index as PqtreeIndex\n'), ((7066, 7104), 'lxml.etree.Element', 'etree.Element', (['SVG_RECT'], {'attrib': 'attrib'}), '(SVG_RECT, attrib=attrib)\n', (7079, 7104), False, 'from lxml import etree\n'), ((7774, 7800), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self.image'], {}), '(self.image)\n', (7788, 7800), False, 'from PIL import Image, ImageDraw, ImageColor\n'), ((7491, 7507), 'math.ceil', 'math.ceil', (['width'], {}), '(width)\n', (7500, 7507), False, 'import math\n'), ((7530, 7547), 'math.ceil', 'math.ceil', (['height'], {}), '(height)\n', (7539, 7547), False, 'import math\n'), ((7698, 7752), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(width, height)', '(255, 255, 255, 0)'], {}), "('RGBA', (width, height), (255, 255, 255, 0))\n", (7707, 7752), False, 'from PIL import Image, ImageDraw, ImageColor\n'), ((6477, 6591), 'lxml.etree.Element', 'etree.Element', (['SVG_RECT'], {'attrib': "{'width': '100%', 'height': '100%', 'fill': background, 'class': 'background'}"}), "(SVG_RECT, attrib={'width': '100%', 'height': '100%', 'fill':\n background, 'class': 'background'})\n", (6490, 6591), False, 'from lxml import etree\n')]
|
import datetime
def add_time(start, duration, weekday_name = ' '):
start = datetime.datetime.strptime(start,'%I:%M %p')
h_duration,m_duration = duration.split(':')
t_duration = int(h_duration)*60 + int(m_duration)
calc_time = start + datetime.timedelta(minutes=int(t_duration))
day = calc_time.day
time = calc_time.strftime('%I:%M %p')
if day == 1:
day_text = ''
elif day == 2:
day_text = '(next day)'
else:
day_text = '('+ str(day-1) + ' days later)'
list_weekdays = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
if weekday_name != ' ' and day >= 2:
weekday_name = weekday_name.lower().capitalize()
weekday_name_index = list_weekdays.index(weekday_name)
i=1
indexw = weekday_name_index
while i != day:
if indexw == 6:
indexw = 0
i+=1
else:
indexw = indexw + 1
i+=1
new_weekday_name = list_weekdays[indexw]
new_time = str(time + ', ' + new_weekday_name + ' ' + day_text)
elif weekday_name != ' ' and day == 1:
new_time = str(time + ', '+ weekday_name.lower().capitalize())
else:
new_time = str(time + ' ' + day_text)
if new_time[0] == '0':
new_time = new_time[1:]
else:
new_time = new_time
return new_time.strip()
|
[
"datetime.datetime.strptime"
] |
[((81, 126), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start', '"""%I:%M %p"""'], {}), "(start, '%I:%M %p')\n", (107, 126), False, 'import datetime\n')]
|
import time
from pdm.constants import BUFFER
from urllib.request import (
build_opener,
urlopen,
Request
)
from http.client import HTTPResponse, HTTPMessage
from threading import Thread, Event
from pdm.utils import get_filename
from concurrent.futures import Future
from pdm.hooker import ProgressDownloadHooker
class _Retriever:
def __init__(
self,
http_response: HTTPResponse,
file: str,
part: int,
single_mode=False
):
self.http = http_response
if single_mode:
self.file = file
else:
self.file = file + '.part' + str(part)
self.speed_download = 0
def get_file(self):
return open(self.file, 'wb')
def download(self):
file = self.get_file()
chunk_size = int(BUFFER.__repr__())
while True:
# adapted from https://github.com/choldgraf/download/blob/master/download/download.py#L380
# with some modifications
t0 = time.time()
chunk = self.http.read(chunk_size)
dt = time.time() - t0
if dt < 0.005:
chunk_size *= 2
elif dt > 0.1 and chunk_size > BUFFER:
chunk_size = chunk_size // 2
if not chunk:
break
file.write(chunk)
self.speed_download = len(chunk) * 8
if chunk == b'':
break
self.speed_download = 'finished'
file.close()
return self.file
class Retriever1Connections:
def __init__(self, url: str, info_length: int, filename: str=None):
self.opener = build_opener()
self.filename = filename
self.url = url
self.length = info_length
def download(self):
res = self.opener.open(self.url)
filename = self.filename or get_filename(res)
r = _Retriever(res, filename, 0, True)
r.download()
return filename
class Retriever2Connections:
def __init__(self, url: str, length: int, filename: str=None):
self.opener = build_opener()
self.filename = filename
self.url = url
self.length = self.get_length(length)
def get_length(self, length: int):
divided = length / 2
if not divided.is_integer():
final = [0, divided - 0.5, divided + 0.5, length]
elif divided.is_integer():
final = [0, divided - 1, divided, length]
return final
def _download(self, part: int, start_from: int, end_from: int, future: Future):
req = Request(self.url)
req.headers['Range'] = 'bytes=%s-%s' % (int(start_from), int(end_from))
res = self.opener.open(req)
filename = self.filename or get_filename(res)
r = _Retriever(res, filename, part)
future.set_result(r.download())
def download(self):
fut1 = Future()
thread = Thread(target=self._download, name='worker_pdm_0', daemon=True, args=(
0,
self.length[0],
self.length[1],
fut1
))
thread.start()
fut2 = Future()
thread = Thread(target=self._download, name='worker_pdm_1', daemon=True, args=(
1,
self.length[2],
self.length[3],
fut2
))
thread.start()
return [
fut1.result(),
fut2.result()
]
class Retriever3Connections:
def __init__(self, url: str, length: int, filename: str=None):
self.opener = build_opener()
self.filename = filename
self.url = url
self.length = self.get_length(length)
def get_length(self, length: int):
final = [0, int(length / 3), int(length / 3 + length / 3), length]
return final
def _download(
self,
part: int,
start_from: int,
end_from: int,
future: Future,
progress_bar: ProgressDownloadHooker
):
req = Request(self.url)
req.headers['Range'] = 'bytes=%s-%s' % (int(start_from), int(end_from))
res = self.opener.open(req)
filename = self.filename or get_filename(res)
r = _Retriever(res, filename, part)
progress_bar.add_worker(r)
future.set_result(r.download())
def download(self):
fut1 = Future()
print('Download Using 3 Connections')
progress_bar = ProgressDownloadHooker()
thread = Thread(target=self._download, name='worker_pdm_0', daemon=True, args=(
0,
self.length[0],
self.length[1],
fut1,
progress_bar
))
thread.start()
fut2 = Future()
thread = Thread(target=self._download, name='worker_pdm_1', daemon=True, args=(
1,
self.length[1],
self.length[2],
fut2,
progress_bar
))
thread.start()
fut3 = Future()
thread = Thread(target=self._download, name='worker_pdm_2', daemon=True, args=(
2,
self.length[2],
self.length[3],
fut3,
progress_bar
))
thread.start()
progress_bar.start()
result = [
fut1.result(),
fut2.result(),
fut3.result()
]
progress_bar.stop()
return result
class Retriever:
def __init__(
self,
url: str,
filename: str,
timeout: int=None,
connections: int=2
):
# Testing Connection to URL given
tester = urlopen(url, timeout=timeout)
tester.close()
self.filename = filename
self.url = url
self._connections = connections
def _download_single_conn(self):
r = Retriever1Connections(self.url, self.filename)
return r.download()
def _download_multi_conn(self, info_length):
if self._connections < 1 or self._connections > 4:
raise ValueError('invalid connections value, maximum connections allowed is 4')
else:
if self._connections == 2:
r = Retriever2Connections(self.url, info_length, self.filename)
return r.download()
elif self._connections == 3:
r = Retriever3Connections(self.url, info_length, self.filename)
return r.download()
def get_info_length(self):
return urlopen(self.url).length
def retrieve(self):
info_length = self.get_info_length()
# for doesn't support get length file like google-drive
# multi connection require to see length of the file
if info_length is None:
# if pdm can't retrieve Content-Length info
# force download to single connection
return self._download_single_conn()
else:
if self._connections == 1:
return self._download_single_conn()
else:
return self._download_multi_conn(info_length)
# def _retrieve(self, part, filename, start_from, end_from, event, single_mode=False):
# r = Request(self.url)
# if not single_mode:
# r.headers['Range'] = 'bytes=%s-%s' % (int(start_from), int(end_from))
# print(r.headers)
# http_response = self.opener.open(r)
# print(http_response.headers['Content-Disposition'])
# print(http_response.length, part)
# if single_mode:
# _ = _Retriever(self.url, http_response, filename, part, True)
# _.download()
# event.set()
# else:
# _ = _Retriever(
# self.url,
# http_response,
# filename,
# part
# )
# _.download()
# event.set()
# def get_length(self, length: int):
# divided = length / 2
# if not divided.is_integer():
# final = [0, divided - 0.5, divided + 0.5, length]
# elif divided.is_integer():
# final = [0, divided - 1, divided, length]
# return final
# def retrieve(self):
# info_length = self.get_info_length()
# # for doesn't support get length file like google-drive
# # multi connection require to see length of the file
# if info_length is None:
# return self._download_single_conn()
# else:
# return self._download_multi_conn(info_length)
# def _download_single_conn(self):
# e = Event()
# self._retrieve(None, self.filename, None, None, e, True)
# return [self.filename]
# def _download_multi_conn(self, info_length):
# i = 0
# length = self.get_length(info_length)
# wait_event1 = Event()
# thread = Thread(target=self._retrieve, name='worker_pdm_' + str(i), daemon=True, args=(
# i,
# self.filename,
# length[0],
# length[1],
# wait_event1
# ))
# thread.start()
# i += 1
# wait_event2= Event()
# thread = Thread(target=self._retrieve, name='worker_pdm_' + str(i), daemon=True, args=(
# i,
# self.filename,
# length[2],
# length[3],
# wait_event2
# ))
# thread.start()
# wait_event1.wait()
# wait_event2.wait()
# return [
# self.filename + '.part0',
# self.filename + '.part1'
# ]
|
[
"threading.Thread",
"concurrent.futures.Future",
"urllib.request.Request",
"urllib.request.urlopen",
"urllib.request.build_opener",
"time.time",
"pdm.hooker.ProgressDownloadHooker",
"pdm.utils.get_filename",
"pdm.constants.BUFFER.__repr__"
] |
[((1645, 1659), 'urllib.request.build_opener', 'build_opener', ([], {}), '()\n', (1657, 1659), False, 'from urllib.request import build_opener, urlopen, Request\n'), ((2081, 2095), 'urllib.request.build_opener', 'build_opener', ([], {}), '()\n', (2093, 2095), False, 'from urllib.request import build_opener, urlopen, Request\n'), ((2575, 2592), 'urllib.request.Request', 'Request', (['self.url'], {}), '(self.url)\n', (2582, 2592), False, 'from urllib.request import build_opener, urlopen, Request\n'), ((2887, 2895), 'concurrent.futures.Future', 'Future', ([], {}), '()\n', (2893, 2895), False, 'from concurrent.futures import Future\n'), ((2913, 3028), 'threading.Thread', 'Thread', ([], {'target': 'self._download', 'name': '"""worker_pdm_0"""', 'daemon': '(True)', 'args': '(0, self.length[0], self.length[1], fut1)'}), "(target=self._download, name='worker_pdm_0', daemon=True, args=(0,\n self.length[0], self.length[1], fut1))\n", (2919, 3028), False, 'from threading import Thread, Event\n'), ((3121, 3129), 'concurrent.futures.Future', 'Future', ([], {}), '()\n', (3127, 3129), False, 'from concurrent.futures import Future\n'), ((3147, 3262), 'threading.Thread', 'Thread', ([], {'target': 'self._download', 'name': '"""worker_pdm_1"""', 'daemon': '(True)', 'args': '(1, self.length[2], self.length[3], fut2)'}), "(target=self._download, name='worker_pdm_1', daemon=True, args=(1,\n self.length[2], self.length[3], fut2))\n", (3153, 3262), False, 'from threading import Thread, Event\n'), ((3539, 3553), 'urllib.request.build_opener', 'build_opener', ([], {}), '()\n', (3551, 3553), False, 'from urllib.request import build_opener, urlopen, Request\n'), ((3983, 4000), 'urllib.request.Request', 'Request', (['self.url'], {}), '(self.url)\n', (3990, 4000), False, 'from urllib.request import build_opener, urlopen, Request\n'), ((4330, 4338), 'concurrent.futures.Future', 'Future', ([], {}), '()\n', (4336, 4338), False, 'from concurrent.futures import Future\n'), ((4408, 4432), 'pdm.hooker.ProgressDownloadHooker', 'ProgressDownloadHooker', ([], {}), '()\n', (4430, 4432), False, 'from pdm.hooker import ProgressDownloadHooker\n'), ((4450, 4579), 'threading.Thread', 'Thread', ([], {'target': 'self._download', 'name': '"""worker_pdm_0"""', 'daemon': '(True)', 'args': '(0, self.length[0], self.length[1], fut1, progress_bar)'}), "(target=self._download, name='worker_pdm_0', daemon=True, args=(0,\n self.length[0], self.length[1], fut1, progress_bar))\n", (4456, 4579), False, 'from threading import Thread, Event\n'), ((4684, 4692), 'concurrent.futures.Future', 'Future', ([], {}), '()\n', (4690, 4692), False, 'from concurrent.futures import Future\n'), ((4710, 4839), 'threading.Thread', 'Thread', ([], {'target': 'self._download', 'name': '"""worker_pdm_1"""', 'daemon': '(True)', 'args': '(1, self.length[1], self.length[2], fut2, progress_bar)'}), "(target=self._download, name='worker_pdm_1', daemon=True, args=(1,\n self.length[1], self.length[2], fut2, progress_bar))\n", (4716, 4839), False, 'from threading import Thread, Event\n'), ((4944, 4952), 'concurrent.futures.Future', 'Future', ([], {}), '()\n', (4950, 4952), False, 'from concurrent.futures import Future\n'), ((4970, 5099), 'threading.Thread', 'Thread', ([], {'target': 'self._download', 'name': '"""worker_pdm_2"""', 'daemon': '(True)', 'args': '(2, self.length[2], self.length[3], fut3, progress_bar)'}), "(target=self._download, name='worker_pdm_2', daemon=True, args=(2,\n self.length[2], self.length[3], fut3, progress_bar))\n", (4976, 5099), False, 'from threading import Thread, Event\n'), ((5590, 5619), 'urllib.request.urlopen', 'urlopen', (['url'], {'timeout': 'timeout'}), '(url, timeout=timeout)\n', (5597, 5619), False, 'from urllib.request import build_opener, urlopen, Request\n'), ((811, 828), 'pdm.constants.BUFFER.__repr__', 'BUFFER.__repr__', ([], {}), '()\n', (826, 828), False, 'from pdm.constants import BUFFER\n'), ((1008, 1019), 'time.time', 'time.time', ([], {}), '()\n', (1017, 1019), False, 'import time\n'), ((1852, 1869), 'pdm.utils.get_filename', 'get_filename', (['res'], {}), '(res)\n', (1864, 1869), False, 'from pdm.utils import get_filename\n'), ((2745, 2762), 'pdm.utils.get_filename', 'get_filename', (['res'], {}), '(res)\n', (2757, 2762), False, 'from pdm.utils import get_filename\n'), ((4153, 4170), 'pdm.utils.get_filename', 'get_filename', (['res'], {}), '(res)\n', (4165, 4170), False, 'from pdm.utils import get_filename\n'), ((6438, 6455), 'urllib.request.urlopen', 'urlopen', (['self.url'], {}), '(self.url)\n', (6445, 6455), False, 'from urllib.request import build_opener, urlopen, Request\n'), ((1084, 1095), 'time.time', 'time.time', ([], {}), '()\n', (1093, 1095), False, 'import time\n')]
|
from unittest import mock
from unittest.mock import patch
from cartography.intel.gsuite import api
def test_get_all_users():
client = mock.MagicMock()
raw_request_1 = mock.MagicMock()
raw_request_2 = mock.MagicMock()
user1 = {'primaryEmail': '<EMAIL>'}
user2 = {'primaryEmail': '<EMAIL>'}
user3 = {'primaryEmail': '<EMAIL>'}
client.users().list.return_value = raw_request_1
client.users().list_next.side_effect = [raw_request_2, None]
raw_request_1.execute.return_value = {'users': [user1, user2]}
raw_request_2.execute.return_value = {'users': [user3]}
result = api.get_all_users(client)
emails = [user['primaryEmail'] for response_object in result for user in response_object['users']]
expected = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
]
assert sorted(emails) == sorted(expected)
def test_get_all_groups():
client = mock.MagicMock()
raw_request_1 = mock.MagicMock()
raw_request_2 = mock.MagicMock()
group1 = {'email': '<EMAIL>'}
group2 = {'email': '<EMAIL>'}
group3 = {'email': '<EMAIL>'}
client.groups().list.return_value = raw_request_1
client.groups().list_next.side_effect = [raw_request_2, None]
raw_request_1.execute.return_value = {'groups': [group1, group2]}
raw_request_2.execute.return_value = {'groups': [group3]}
result = api.get_all_groups(client)
emails = [group['email'] for response_object in result for group in response_object['groups']]
expected = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
]
assert sorted(emails) == sorted(expected)
@patch('cartography.intel.gsuite.api.cleanup_gsuite_users')
@patch('cartography.intel.gsuite.api.load_gsuite_users')
@patch(
'cartography.intel.gsuite.api.get_all_users', return_value=[
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
],
)
def test_sync_gsuite_users(get_all_users, load_gsuite_users, cleanup_gsuite_users):
client = mock.MagicMock()
gsuite_update_tag = 1
session = mock.MagicMock()
common_job_param = {
"UPDATE_TAG": gsuite_update_tag,
}
api.sync_gsuite_users(session, client, gsuite_update_tag, common_job_param)
users = api.transform_users(get_all_users())
load_gsuite_users.assert_called_with(
session, users, gsuite_update_tag,
)
cleanup_gsuite_users.assert_called_once()
@patch('cartography.intel.gsuite.api.sync_gsuite_members')
@patch('cartography.intel.gsuite.api.cleanup_gsuite_groups')
@patch('cartography.intel.gsuite.api.load_gsuite_groups')
@patch(
'cartography.intel.gsuite.api.get_all_groups', return_value=[
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
],
)
def test_sync_gsuite_groups(all_groups, load_gsuite_groups, cleanup_gsuite_groups, sync_gsuite_members):
admin_client = mock.MagicMock()
session = mock.MagicMock()
gsuite_update_tag = 1
common_job_param = {
"UPDATE_TAG": gsuite_update_tag,
}
api.sync_gsuite_groups(session, admin_client, gsuite_update_tag, common_job_param)
groups = api.transform_groups(all_groups())
load_gsuite_groups.assert_called_with(session, groups, gsuite_update_tag)
cleanup_gsuite_groups.assert_called_once()
sync_gsuite_members.assert_called_with(groups, session, admin_client, gsuite_update_tag)
def test_load_gsuite_groups():
ingestion_qry = """
UNWIND {GroupData} as group
MERGE (g:GSuiteGroup{id: group.id})
ON CREATE SET
g.firstseen = {UpdateTag}
ON MATCH SET
g.group_id = group.id,
g.admin_created = group.adminCreated,
g.description = group.description,
g.direct_members_count = group.directMembersCount,
g.email = group.email,
g.etag = group.etag,
g.kind = group.kind,
g.name = group.name,
g.lastupdated = {UpdateTag}
"""
groups = []
update_tag = 1
session = mock.MagicMock()
api.load_gsuite_groups(session, groups, update_tag)
session.run.assert_called_with(
ingestion_qry,
GroupData=groups,
UpdateTag=update_tag,
)
def test_load_gsuite_users():
ingestion_qry = """
UNWIND {UserData} as user
MERGE (u:GSuiteUser{id: user.id})
ON CREATE SET
u.firstseen = {UpdateTag}
ON MATCH SET
u.user_id = user.id,
u.agreed_to_terms = user.agreedToTerms,
u.archived = user.archived,
u.change_password_at_next_login = user.changePasswordAtNextLogin,
u.creation_time = user.creationTime,
u.customer_id = user.customerId,
u.etag = user.etag,
u.include_in_global_address_list = user.includeInGlobalAddressList,
u.ip_whitelisted = user.ipWhitelisted,
u.is_admin = user.isAdmin,
u.is_delegated_admin = user.isDelegatedAdmin,
u.is_enforced_in_2_sv = user.isEnforcedIn2Sv,
u.is_enrolled_in_2_sv = user.isEnrolledIn2Sv,
u.is_mailbox_setup = user.isMailboxSetup,
u.kind = user.kind,
u.last_login_time = user.lastLoginTime,
u.name = user.name.fullName,
u.family_name = user.name.familyName,
u.given_name = user.name.givenName,
u.org_unit_path = user.orgUnitPath,
u.primary_email = user.primaryEmail,
u.email = user.primaryEmail,
u.suspended = user.suspended,
u.thumbnail_photo_etag = user.thumbnailPhotoEtag,
u.thumbnail_photo_url = user.thumbnailPhotoUrl,
u.lastupdated = {UpdateTag}
"""
users = []
update_tag = 1
session = mock.MagicMock()
api.load_gsuite_users(session, users, update_tag)
session.run.assert_called_with(
ingestion_qry,
UserData=users,
UpdateTag=update_tag,
)
def test_transform_groups():
param = [
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
]
expected = [
{'email': '<EMAIL>'}, {'email': 'group<EMAIL>'},
{'email': '<EMAIL>'}, {'email': '<EMAIL>'},
]
result = api.transform_groups(param)
assert result == expected
def test_transform_users():
param = [
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
]
expected = [
{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'},
{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'},
]
result = api.transform_users(param)
assert result == expected
|
[
"cartography.intel.gsuite.api.transform_users",
"cartography.intel.gsuite.api.get_all_users",
"unittest.mock.MagicMock",
"cartography.intel.gsuite.api.load_gsuite_groups",
"cartography.intel.gsuite.api.sync_gsuite_users",
"cartography.intel.gsuite.api.load_gsuite_users",
"unittest.mock.patch",
"cartography.intel.gsuite.api.sync_gsuite_groups",
"cartography.intel.gsuite.api.transform_groups",
"cartography.intel.gsuite.api.get_all_groups"
] |
[((1630, 1688), 'unittest.mock.patch', 'patch', (['"""cartography.intel.gsuite.api.cleanup_gsuite_users"""'], {}), "('cartography.intel.gsuite.api.cleanup_gsuite_users')\n", (1635, 1688), False, 'from unittest.mock import patch\n'), ((1690, 1745), 'unittest.mock.patch', 'patch', (['"""cartography.intel.gsuite.api.load_gsuite_users"""'], {}), "('cartography.intel.gsuite.api.load_gsuite_users')\n", (1695, 1745), False, 'from unittest.mock import patch\n'), ((1747, 1963), 'unittest.mock.patch', 'patch', (['"""cartography.intel.gsuite.api.get_all_users"""'], {'return_value': "[{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]}, {\n 'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]}]"}), "('cartography.intel.gsuite.api.get_all_users', return_value=[{'users':\n [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]}, {'users':\n [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]}])\n", (1752, 1963), False, 'from unittest.mock import patch\n'), ((2498, 2555), 'unittest.mock.patch', 'patch', (['"""cartography.intel.gsuite.api.sync_gsuite_members"""'], {}), "('cartography.intel.gsuite.api.sync_gsuite_members')\n", (2503, 2555), False, 'from unittest.mock import patch\n'), ((2557, 2616), 'unittest.mock.patch', 'patch', (['"""cartography.intel.gsuite.api.cleanup_gsuite_groups"""'], {}), "('cartography.intel.gsuite.api.cleanup_gsuite_groups')\n", (2562, 2616), False, 'from unittest.mock import patch\n'), ((2618, 2674), 'unittest.mock.patch', 'patch', (['"""cartography.intel.gsuite.api.load_gsuite_groups"""'], {}), "('cartography.intel.gsuite.api.load_gsuite_groups')\n", (2623, 2674), False, 'from unittest.mock import patch\n'), ((2676, 2869), 'unittest.mock.patch', 'patch', (['"""cartography.intel.gsuite.api.get_all_groups"""'], {'return_value': "[{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]}, {'groups': [{\n 'email': '<EMAIL>'}, {'email': '<EMAIL>'}]}]"}), "('cartography.intel.gsuite.api.get_all_groups', return_value=[{\n 'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]}, {'groups': [{\n 'email': '<EMAIL>'}, {'email': '<EMAIL>'}]}])\n", (2681, 2869), False, 'from unittest.mock import patch\n'), ((141, 157), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (155, 157), False, 'from unittest import mock\n'), ((178, 194), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (192, 194), False, 'from unittest import mock\n'), ((215, 231), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (229, 231), False, 'from unittest import mock\n'), ((614, 639), 'cartography.intel.gsuite.api.get_all_users', 'api.get_all_users', (['client'], {}), '(client)\n', (631, 639), False, 'from cartography.intel.gsuite import api\n'), ((912, 928), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (926, 928), False, 'from unittest import mock\n'), ((949, 965), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (963, 965), False, 'from unittest import mock\n'), ((986, 1002), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1000, 1002), False, 'from unittest import mock\n'), ((1374, 1400), 'cartography.intel.gsuite.api.get_all_groups', 'api.get_all_groups', (['client'], {}), '(client)\n', (1392, 1400), False, 'from cartography.intel.gsuite import api\n'), ((2083, 2099), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2097, 2099), False, 'from unittest import mock\n'), ((2140, 2156), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2154, 2156), False, 'from unittest import mock\n'), ((2233, 2308), 'cartography.intel.gsuite.api.sync_gsuite_users', 'api.sync_gsuite_users', (['session', 'client', 'gsuite_update_tag', 'common_job_param'], {}), '(session, client, gsuite_update_tag, common_job_param)\n', (2254, 2308), False, 'from cartography.intel.gsuite import api\n'), ((3014, 3030), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3028, 3030), False, 'from unittest import mock\n'), ((3045, 3061), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3059, 3061), False, 'from unittest import mock\n'), ((3164, 3250), 'cartography.intel.gsuite.api.sync_gsuite_groups', 'api.sync_gsuite_groups', (['session', 'admin_client', 'gsuite_update_tag', 'common_job_param'], {}), '(session, admin_client, gsuite_update_tag,\n common_job_param)\n', (3186, 3250), False, 'from cartography.intel.gsuite import api\n'), ((4117, 4133), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4131, 4133), False, 'from unittest import mock\n'), ((4138, 4189), 'cartography.intel.gsuite.api.load_gsuite_groups', 'api.load_gsuite_groups', (['session', 'groups', 'update_tag'], {}), '(session, groups, update_tag)\n', (4160, 4189), False, 'from cartography.intel.gsuite import api\n'), ((5765, 5781), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (5779, 5781), False, 'from unittest import mock\n'), ((5786, 5835), 'cartography.intel.gsuite.api.load_gsuite_users', 'api.load_gsuite_users', (['session', 'users', 'update_tag'], {}), '(session, users, update_tag)\n', (5807, 5835), False, 'from cartography.intel.gsuite import api\n'), ((6283, 6310), 'cartography.intel.gsuite.api.transform_groups', 'api.transform_groups', (['param'], {}), '(param)\n', (6303, 6310), False, 'from cartography.intel.gsuite import api\n'), ((6717, 6743), 'cartography.intel.gsuite.api.transform_users', 'api.transform_users', (['param'], {}), '(param)\n', (6736, 6743), False, 'from cartography.intel.gsuite import api\n')]
|
from django.conf import settings
from notifications import constants, email, helpers
def verification_code_not_given():
verification_code_not_given_first_reminder()
verification_code_not_given_seconds_reminder()
def verification_code_not_given_first_reminder():
days_ago = settings.VERIFICATION_CODE_NOT_GIVEN_DAYS
category = constants.VERIFICATION_CODE_NOT_GIVEN
company_users = (
helpers.get_unverified_suppliers(days_ago)
.filter(
company__is_uk_isd_company=False,
)
.exclude(
supplieremailnotification__category=category,
)
)
for company_user in company_users:
notification = email.VerificationWaitingNotification(company_user)
notification.send()
def verification_code_not_given_seconds_reminder():
days_ago = settings.VERIFICATION_CODE_NOT_GIVEN_DAYS_2ND_EMAIL
category = constants.VERIFICATION_CODE_2ND_EMAIL
company_users = (
helpers.get_unverified_suppliers(days_ago)
.filter(
company__is_uk_isd_company=False,
)
.exclude(
supplieremailnotification__category=category,
)
)
for company_user in company_users:
notification = email.VerificationStillWaitingNotification(company_user)
notification.send()
def new_companies_in_sector():
companies_grouped_by_industry = helpers.group_new_companies_by_industry()
for subscriber in helpers.get_new_companies_anonymous_subscribers():
companies = set()
for industry in subscriber['industries']:
companies.update(companies_grouped_by_industry[industry])
if companies:
notification = email.NewCompaniesInSectorNotification(subscriber=subscriber, companies=companies)
notification.send()
def company_user_unsubscribed(company_user):
notification = email.SupplierUbsubscribed(company_user)
notification.send()
def anonymous_unsubscribed(recipient_email):
recipient = {'email': recipient_email, 'name': None}
notification = email.AnonymousSubscriberUbsubscribed(recipient)
notification.send()
|
[
"notifications.helpers.group_new_companies_by_industry",
"notifications.helpers.get_new_companies_anonymous_subscribers",
"notifications.email.NewCompaniesInSectorNotification",
"notifications.email.VerificationStillWaitingNotification",
"notifications.helpers.get_unverified_suppliers",
"notifications.email.SupplierUbsubscribed",
"notifications.email.VerificationWaitingNotification",
"notifications.email.AnonymousSubscriberUbsubscribed"
] |
[((1393, 1434), 'notifications.helpers.group_new_companies_by_industry', 'helpers.group_new_companies_by_industry', ([], {}), '()\n', (1432, 1434), False, 'from notifications import constants, email, helpers\n'), ((1458, 1507), 'notifications.helpers.get_new_companies_anonymous_subscribers', 'helpers.get_new_companies_anonymous_subscribers', ([], {}), '()\n', (1505, 1507), False, 'from notifications import constants, email, helpers\n'), ((1885, 1925), 'notifications.email.SupplierUbsubscribed', 'email.SupplierUbsubscribed', (['company_user'], {}), '(company_user)\n', (1911, 1925), False, 'from notifications import constants, email, helpers\n'), ((2073, 2121), 'notifications.email.AnonymousSubscriberUbsubscribed', 'email.AnonymousSubscriberUbsubscribed', (['recipient'], {}), '(recipient)\n', (2110, 2121), False, 'from notifications import constants, email, helpers\n'), ((685, 736), 'notifications.email.VerificationWaitingNotification', 'email.VerificationWaitingNotification', (['company_user'], {}), '(company_user)\n', (722, 736), False, 'from notifications import constants, email, helpers\n'), ((1239, 1295), 'notifications.email.VerificationStillWaitingNotification', 'email.VerificationStillWaitingNotification', (['company_user'], {}), '(company_user)\n', (1281, 1295), False, 'from notifications import constants, email, helpers\n'), ((1704, 1791), 'notifications.email.NewCompaniesInSectorNotification', 'email.NewCompaniesInSectorNotification', ([], {'subscriber': 'subscriber', 'companies': 'companies'}), '(subscriber=subscriber, companies=\n companies)\n', (1742, 1791), False, 'from notifications import constants, email, helpers\n'), ((415, 457), 'notifications.helpers.get_unverified_suppliers', 'helpers.get_unverified_suppliers', (['days_ago'], {}), '(days_ago)\n', (447, 457), False, 'from notifications import constants, email, helpers\n'), ((969, 1011), 'notifications.helpers.get_unverified_suppliers', 'helpers.get_unverified_suppliers', (['days_ago'], {}), '(days_ago)\n', (1001, 1011), False, 'from notifications import constants, email, helpers\n')]
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
import torchvision.transforms as T
import random
import numpy as np
import PIL
from .transforms import RandomErasing
class AddGaussianNoise(object):
def __call__(self, img):
std = random.uniform(0, 1.0)
if std > 0.5:
return img
# Convert to ndarray
img = np.asarray(img).copy()
noise = np.random.normal(size=img.shape, scale=std).astype(np.uint8)
img += noise
img = np.clip(img, 0, 255)
# Convert back to PIL image
img = PIL.Image.fromarray(img)
return img
def build_transforms(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
print('++++ hard train')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_DOWN),
T.Resize(cfg.INPUT.SIZE_UP),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
T.Pad(padding=cfg.INPUT.PADDING),
T.RandomRotation(cfg.INPUT.DEGREE),
T.ColorJitter(0.6,0.9,0.7),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
#AddGaussianNoise(),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
print('++++ init test')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
def build_transforms2(cfg, is_train=True):
#print('++++ easy')
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
print('++++ easy train')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
#T.Pad(cfg.INPUT.PADDING),
T.ColorJitter(0.4,0.6,0.7),
T.RandomRotation(cfg.INPUT.DEGREE),
#T.ColorJitter(0.4,0.6,0.7),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
print('++++ easy test')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
def build_transforms3(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
print('++++ init train')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
print('++++ init test')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
|
[
"torchvision.transforms.ColorJitter",
"torchvision.transforms.RandomHorizontalFlip",
"random.uniform",
"torchvision.transforms.RandomRotation",
"numpy.asarray",
"numpy.clip",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Pad",
"numpy.random.normal",
"PIL.Image.fromarray",
"torchvision.transforms.Normalize",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Resize"
] |
[((691, 754), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': 'cfg.INPUT.PIXEL_MEAN', 'std': 'cfg.INPUT.PIXEL_STD'}), '(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)\n', (702, 754), True, 'import torchvision.transforms as T\n'), ((1655, 1718), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': 'cfg.INPUT.PIXEL_MEAN', 'std': 'cfg.INPUT.PIXEL_STD'}), '(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)\n', (1666, 1718), True, 'import torchvision.transforms as T\n'), ((2593, 2656), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': 'cfg.INPUT.PIXEL_MEAN', 'std': 'cfg.INPUT.PIXEL_STD'}), '(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)\n', (2604, 2656), True, 'import torchvision.transforms as T\n'), ((259, 281), 'random.uniform', 'random.uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (273, 281), False, 'import random\n'), ((506, 526), 'numpy.clip', 'np.clip', (['img', '(0)', '(255)'], {}), '(img, 0, 255)\n', (513, 526), True, 'import numpy as np\n'), ((578, 602), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['img'], {}), '(img)\n', (597, 602), False, 'import PIL\n'), ((371, 386), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (381, 386), True, 'import numpy as np\n'), ((410, 453), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'img.shape', 'scale': 'std'}), '(size=img.shape, scale=std)\n', (426, 453), True, 'import numpy as np\n'), ((853, 882), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_DOWN'], {}), '(cfg.INPUT.SIZE_DOWN)\n', (861, 882), True, 'import torchvision.transforms as T\n'), ((896, 923), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_UP'], {}), '(cfg.INPUT.SIZE_UP)\n', (904, 923), True, 'import torchvision.transforms as T\n'), ((937, 977), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': 'cfg.INPUT.PROB'}), '(p=cfg.INPUT.PROB)\n', (959, 977), True, 'import torchvision.transforms as T\n'), ((991, 1023), 'torchvision.transforms.Pad', 'T.Pad', ([], {'padding': 'cfg.INPUT.PADDING'}), '(padding=cfg.INPUT.PADDING)\n', (996, 1023), True, 'import torchvision.transforms as T\n'), ((1037, 1071), 'torchvision.transforms.RandomRotation', 'T.RandomRotation', (['cfg.INPUT.DEGREE'], {}), '(cfg.INPUT.DEGREE)\n', (1053, 1071), True, 'import torchvision.transforms as T\n'), ((1085, 1113), 'torchvision.transforms.ColorJitter', 'T.ColorJitter', (['(0.6)', '(0.9)', '(0.7)'], {}), '(0.6, 0.9, 0.7)\n', (1098, 1113), True, 'import torchvision.transforms as T\n'), ((1125, 1159), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (1137, 1159), True, 'import torchvision.transforms as T\n'), ((1206, 1218), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1216, 1218), True, 'import torchvision.transforms as T\n'), ((1438, 1467), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TEST'], {}), '(cfg.INPUT.SIZE_TEST)\n', (1446, 1467), True, 'import torchvision.transforms as T\n'), ((1481, 1493), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1491, 1493), True, 'import torchvision.transforms as T\n'), ((1817, 1847), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (1825, 1847), True, 'import torchvision.transforms as T\n'), ((1861, 1901), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': 'cfg.INPUT.PROB'}), '(p=cfg.INPUT.PROB)\n', (1883, 1901), True, 'import torchvision.transforms as T\n'), ((1954, 1982), 'torchvision.transforms.ColorJitter', 'T.ColorJitter', (['(0.4)', '(0.6)', '(0.7)'], {}), '(0.4, 0.6, 0.7)\n', (1967, 1982), True, 'import torchvision.transforms as T\n'), ((1994, 2028), 'torchvision.transforms.RandomRotation', 'T.RandomRotation', (['cfg.INPUT.DEGREE'], {}), '(cfg.INPUT.DEGREE)\n', (2010, 2028), True, 'import torchvision.transforms as T\n'), ((2083, 2107), 'torchvision.transforms.Pad', 'T.Pad', (['cfg.INPUT.PADDING'], {}), '(cfg.INPUT.PADDING)\n', (2088, 2107), True, 'import torchvision.transforms as T\n'), ((2121, 2155), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (2133, 2155), True, 'import torchvision.transforms as T\n'), ((2169, 2181), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2179, 2181), True, 'import torchvision.transforms as T\n'), ((2401, 2430), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TEST'], {}), '(cfg.INPUT.SIZE_TEST)\n', (2409, 2430), True, 'import torchvision.transforms as T\n'), ((2444, 2456), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2454, 2456), True, 'import torchvision.transforms as T\n'), ((2755, 2785), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (2763, 2785), True, 'import torchvision.transforms as T\n'), ((2799, 2839), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': 'cfg.INPUT.PROB'}), '(p=cfg.INPUT.PROB)\n', (2821, 2839), True, 'import torchvision.transforms as T\n'), ((2853, 2877), 'torchvision.transforms.Pad', 'T.Pad', (['cfg.INPUT.PADDING'], {}), '(cfg.INPUT.PADDING)\n', (2858, 2877), True, 'import torchvision.transforms as T\n'), ((2891, 2925), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (2903, 2925), True, 'import torchvision.transforms as T\n'), ((2939, 2951), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2949, 2951), True, 'import torchvision.transforms as T\n'), ((3171, 3200), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TEST'], {}), '(cfg.INPUT.SIZE_TEST)\n', (3179, 3200), True, 'import torchvision.transforms as T\n'), ((3214, 3226), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (3224, 3226), True, 'import torchvision.transforms as T\n')]
|
from datetime import datetime
from core.modules.output_mod import output
def greet(master):
hour = datetime.now().hour
if 5 <= hour < 12:
output(f"Good morning {master}")
elif 12 <= hour < 18:
output(f"Good afternoon {master}")
else:
output(f"Good evening {master}")
|
[
"core.modules.output_mod.output",
"datetime.datetime.now"
] |
[((106, 120), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (118, 120), False, 'from datetime import datetime\n'), ((158, 190), 'core.modules.output_mod.output', 'output', (['f"""Good morning {master}"""'], {}), "(f'Good morning {master}')\n", (164, 190), False, 'from core.modules.output_mod import output\n'), ((225, 259), 'core.modules.output_mod.output', 'output', (['f"""Good afternoon {master}"""'], {}), "(f'Good afternoon {master}')\n", (231, 259), False, 'from core.modules.output_mod import output\n'), ((278, 310), 'core.modules.output_mod.output', 'output', (['f"""Good evening {master}"""'], {}), "(f'Good evening {master}')\n", (284, 310), False, 'from core.modules.output_mod import output\n')]
|
import h5py
from signal_filter.fft import LowPassFilter
from signal_filter.mpi_signal_filter import SignalFilter
h5_path = 'giv_raw.h5'
h5_f = h5py.File(h5_path, mode='r+')
h5_grp = h5_f['Measurement_000/Channel_000']
h5_main = h5_grp['Raw_Data']
samp_rate = h5_grp.attrs['IO_samp_rate_[Hz]']
num_spectral_pts = h5_main.shape[1]
frequency_filters = [LowPassFilter(num_spectral_pts, samp_rate, 10E+3)]
noise_tol = 1E-6
sig_filt = SignalFilter(h5_main, frequency_filters=frequency_filters,
noise_threshold=noise_tol, write_filtered=True,
write_condensed=False, num_pix=1, verbose=True)
h5_filt_grp = sig_filt.compute()
# VERIFICATION here:
row_ind = 20
actual_line = h5_filt_grp['Filtered_Data'][row_ind]
h5_ref_path = '/home/syz/giv/pzt_nanocap_6_just_translation_filt_resh_copy.h5'
h5_ref_file = h5py.File(h5_ref_path, mode='r')
h5_ref_grp = h5_ref_file[h5_filt_grp.name]
ref_line = h5_ref_grp['Filtered_Data'][row_ind]
import numpy as np
print('Actual line close to reference:')
print(np.max(np.abs(actual_line - ref_line)))
print(np.allclose(actual_line, ref_line))
"""
single_AO = h5_grp['Spectroscopic_Values'][0, :500]
import numpy as np
row_ind = 20
# read data for a specific scan line
raw_line_resp = h5_main[row_ind]
# break this up into pixels:
raw_line_mat = np.reshape(raw_line_resp, (-1, single_AO.size))
filt_line_resp = h5_filt_grp['Filtered_Data'][row_ind]
filt_line_mat = np.reshape(filt_line_resp, (-1, single_AO.size))
import pyUSID as usid
fig, axes = usid.plot_utils.plot_curves(single_AO, [raw_line_mat, filt_line_mat], use_rainbow_plots=False, x_label='Bias (V)',
y_label='Current (nA)', subtitle_prefix='Pixel', title=None, num_plots=9)
fig.savefig('result.png', format='png', )
savefig(os.path.join(other_figures_folder, file_name + '.png'), format='png', dpi=300)
"""
h5_f.close()
|
[
"h5py.File",
"numpy.abs",
"numpy.allclose",
"signal_filter.fft.LowPassFilter",
"signal_filter.mpi_signal_filter.SignalFilter"
] |
[((145, 174), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r+"""'}), "(h5_path, mode='r+')\n", (154, 174), False, 'import h5py\n'), ((434, 597), 'signal_filter.mpi_signal_filter.SignalFilter', 'SignalFilter', (['h5_main'], {'frequency_filters': 'frequency_filters', 'noise_threshold': 'noise_tol', 'write_filtered': '(True)', 'write_condensed': '(False)', 'num_pix': '(1)', 'verbose': '(True)'}), '(h5_main, frequency_filters=frequency_filters, noise_threshold=\n noise_tol, write_filtered=True, write_condensed=False, num_pix=1,\n verbose=True)\n', (446, 597), False, 'from signal_filter.mpi_signal_filter import SignalFilter\n'), ((857, 889), 'h5py.File', 'h5py.File', (['h5_ref_path'], {'mode': '"""r"""'}), "(h5_ref_path, mode='r')\n", (866, 889), False, 'import h5py\n'), ((354, 405), 'signal_filter.fft.LowPassFilter', 'LowPassFilter', (['num_spectral_pts', 'samp_rate', '(10000.0)'], {}), '(num_spectral_pts, samp_rate, 10000.0)\n', (367, 405), False, 'from signal_filter.fft import LowPassFilter\n'), ((1094, 1128), 'numpy.allclose', 'np.allclose', (['actual_line', 'ref_line'], {}), '(actual_line, ref_line)\n', (1105, 1128), True, 'import numpy as np\n'), ((1055, 1085), 'numpy.abs', 'np.abs', (['(actual_line - ref_line)'], {}), '(actual_line - ref_line)\n', (1061, 1085), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from scrapy_splash import SplashRequest
from work.items import ShopItem
import re
class Test2Spider(scrapy.Spider):
name = 'test2'
allowed_domains = ['www.countryattire.com']
start_urls = ['https://www.countryattire.com/']
custom_settings = {
'MYSQL_TABLE': 'test',
'ITEM_PIPELINES': {}
}
def parse(self, response):
nav = response.xpath('//ul[@id="pronav"]/li')[2:8]
nav.pop(3)
nav_level_1_list = nav
for nav_level_1 in nav_level_1_list:
cat1 = nav_level_1.xpath('./a/span/text()').get().strip()
nav_level_2_list = nav_level_1.xpath('.//div[@id="menu"]/div')[1:]
for nav_level_2 in nav_level_2_list:
c2 = nav_level_2.xpath('./div/div/span/text()').get()
if c2 is None:
c2 = nav_level_2.xpath('./div/div/span/a/text()').get()
if c2 is None:
c2 = nav_level_2.xpath('./div/div/span/span/text()').get()
cat2 = c2.strip()
nav_level_3_list = nav_level_2.xpath('./div/span')
if not nav_level_3_list:
nav_level_2_url = nav_level_2.xpath('./a/@href').get()
self.logger.info(f'{cat1}---{cat2}')
meta = {'cat1': cat1, 'cat2': cat2}
yield SplashRequest(response.urljoin(nav_level_2_url), self.parse_product_url, meta=meta)
for nav_level_3 in nav_level_3_list:
cat3 = nav_level_3.xpath('./a/text()').get().strip()
nav_level_3_url = nav_level_3.xpath('./a/@href').get()
self.logger.info(f'{cat1}---{cat2}---{cat3}')
meta = {'cat1': cat1, 'cat2': cat2, 'cat3': cat3}
yield SplashRequest(response.urljoin(nav_level_3_url), self.parse_product_url, meta=meta)
def parse_product_url(self, response):
product_list = response.xpath('//div[@class="products-grid"]/div')
for product in product_list:
product_url = product.xpath('./a/@href').get()
self.logger.info('product url is %s' % product_url)
# yield SplashRequest(response.urljoin(product_url), self.parse_product_info, meta=response.meta)
next_page = response.xpath('//a[@class="next i-next"]/@href').get()
# self.logger.info('next page is %s' % next_page)
if next_page is not None:
yield SplashRequest(response.urljoin(next_page), self.parse_product_url, meta=response.meta)
def parse_product_info(self, response):
item = ShopItem()
item['PageUrl'] = response.url
item['cat1'] = response.meta['cat1']
item['cat2'] = response.meta['cat2']
item['cat3'] = response.meta['cat3'] or ''
item['brand'] = response.xpath('').get().strip()
item['gender'] = item['cat1']
item['producttype'] = item['cat2']
item['title'] = response.xpath('').get()
item['price'] = response.xpath('').get()
item['short_content'] = ''
item['content'] = response.xpath('').get()
pictures = response.xpath('').getall()
picture = response.xpath('').getall()
item['pictures'] = pictures or picture
item['color'] = ''
item['size'] = response.xpath('').getall()
yield item
|
[
"work.items.ShopItem"
] |
[((2701, 2711), 'work.items.ShopItem', 'ShopItem', ([], {}), '()\n', (2709, 2711), False, 'from work.items import ShopItem\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-08 18:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=30, null=True)),
('code', models.CharField(blank=True, max_length=6, null=True)),
('days_given', models.IntegerField(blank=True, null=True)),
('ref_books', models.TextField(blank=True, max_length=500, null=True)),
('is_elective', models.BooleanField(default=False)),
('credits', models.IntegerField(blank=True, default=0, null=True)),
('classification', models.CharField(choices=[(b'Theory', b'Theory'), (b'Lab', b'LAB')], default=b'th', max_length=6, null=True)),
],
options={
'verbose_name': 'Course',
'verbose_name_plural': 'Courses',
},
),
migrations.CreateModel(
name='Semester',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, choices=[(b'I', b'Semester I'), (b'II', b'Semester II'), (b'III', b'Semester III'), (b'IV', b'Semester IV'), (b'V', b'Semester V'), (b'VI', b'Semester VI'), (b'VII', b'Semester VII'), (b'VIII', b'Semester VIII')], default=b'Semester I', max_length=15, null=True)),
('spi', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'ordering': ('name',),
'verbose_name': 'Semester',
'verbose_name_plural': 'Semesters',
},
),
migrations.AddField(
model_name='course',
name='semester',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='semester.Semester'),
),
]
|
[
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.IntegerField"
] |
[((2177, 2268), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""semester.Semester"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'semester.Semester')\n", (2194, 2268), False, 'from django.db import migrations, models\n'), ((400, 493), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (416, 493), False, 'from django.db import migrations, models\n'), ((517, 571), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'null': '(True)'}), '(blank=True, max_length=30, null=True)\n', (533, 571), False, 'from django.db import migrations, models\n'), ((599, 652), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(6)', 'null': '(True)'}), '(blank=True, max_length=6, null=True)\n', (615, 652), False, 'from django.db import migrations, models\n'), ((686, 728), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (705, 728), False, 'from django.db import migrations, models\n'), ((761, 816), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(500)', 'null': '(True)'}), '(blank=True, max_length=500, null=True)\n', (777, 816), False, 'from django.db import migrations, models\n'), ((851, 885), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (870, 885), False, 'from django.db import migrations, models\n'), ((916, 969), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'default': '(0)', 'null': '(True)'}), '(blank=True, default=0, null=True)\n', (935, 969), False, 'from django.db import migrations, models\n'), ((1007, 1119), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(b'Theory', b'Theory'), (b'Lab', b'LAB')]", 'default': "b'th'", 'max_length': '(6)', 'null': '(True)'}), "(choices=[(b'Theory', b'Theory'), (b'Lab', b'LAB')],\n default=b'th', max_length=6, null=True)\n", (1023, 1119), False, 'from django.db import migrations, models\n'), ((1378, 1471), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1394, 1471), False, 'from django.db import migrations, models\n'), ((1495, 1804), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[(b'I', b'Semester I'), (b'II', b'Semester II'), (b'III', b'Semester III'),\n (b'IV', b'Semester IV'), (b'V', b'Semester V'), (b'VI', b'Semester VI'),\n (b'VII', b'Semester VII'), (b'VIII', b'Semester VIII')]", 'default': "b'Semester I'", 'max_length': '(15)', 'null': '(True)'}), "(blank=True, choices=[(b'I', b'Semester I'), (b'II',\n b'Semester II'), (b'III', b'Semester III'), (b'IV', b'Semester IV'), (\n b'V', b'Semester V'), (b'VI', b'Semester VI'), (b'VII', b'Semester VII'\n ), (b'VIII', b'Semester VIII')], default=b'Semester I', max_length=15,\n null=True)\n", (1511, 1804), False, 'from django.db import migrations, models\n'), ((1813, 1867), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (1829, 1867), False, 'from django.db import migrations, models\n')]
|
from django.db import models
import datetime
from django.contrib.auth.models import User
# Create your models here.
class Review(models.Model):
user = models.ForeignKey(User,blank=True,null=True,on_delete=models.CASCADE)
mark = models.IntegerField()
text = models.TextField()
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username + str(self.mark)
|
[
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.TextField",
"django.db.models.DateTimeField"
] |
[((153, 225), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'blank': '(True)', 'null': '(True)', 'on_delete': 'models.CASCADE'}), '(User, blank=True, null=True, on_delete=models.CASCADE)\n', (170, 225), False, 'from django.db import models\n'), ((231, 252), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (250, 252), False, 'from django.db import models\n'), ((261, 279), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (277, 279), False, 'from django.db import models\n'), ((291, 330), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (311, 330), False, 'from django.db import models\n')]
|
"""
Run a job: i.e. run a configuration file through the DAGRunner
"""
import argparse
import logging
import warnings
######################################
######################################
# Important:
#
# If your configuration uses custom node classes, be sure to set environment variable
# PRIMROSE_EXT_NODE_PACKAGE to the location of your package before running primrose.
# Example:
# ```
# export PRIMROSE_EXT_NODE_PACKAGE=src/mypackage
# python run_primrose.py --config_loc my_config.json
# ```
#
######################################
######################################
from primrose.configuration.configuration import Configuration
from primrose.dag_runner import DagRunner
from primrose.dag.config_layer_traverser import ConfigLayerTraverser
from primrose.dag.depth_first_traverser import DepthFirstTraverser
warnings.filterwarnings("ignore")
def parse_arguments():
"""
Parse command line arguments
Returns:
argument objects with flags as attributes
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config_loc", help="Location of the configuration file", required=True
)
parser.add_argument(
"--is_dry_run",
help="do a dry run of the DAG which will validatre config and log which nodes would be run",
default=False,
type=lambda x: (str(x).lower() == "true"),
)
known_args, pipeline_args = parser.parse_known_args()
return known_args, pipeline_args
def main():
"""
Run a job: i.e. run a configuration file through the DAGRunner
"""
args, _ = parse_arguments()
logging.basicConfig(
format="%(asctime)s %(levelname)s %(filename)s %(funcName)s: %(message)s",
level=logging.INFO,
)
configuration = Configuration(config_location=args.config_loc)
DagRunner(configuration).run(dry_run=args.is_dry_run)
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"warnings.filterwarnings",
"primrose.dag_runner.DagRunner",
"primrose.configuration.configuration.Configuration"
] |
[((843, 876), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (866, 876), False, 'import warnings\n'), ((1028, 1053), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1051, 1053), False, 'import argparse\n'), ((1626, 1749), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(filename)s %(funcName)s: %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s %(levelname)s %(filename)s %(funcName)s: %(message)s',\n level=logging.INFO)\n", (1645, 1749), False, 'import logging\n'), ((1785, 1831), 'primrose.configuration.configuration.Configuration', 'Configuration', ([], {'config_location': 'args.config_loc'}), '(config_location=args.config_loc)\n', (1798, 1831), False, 'from primrose.configuration.configuration import Configuration\n'), ((1837, 1861), 'primrose.dag_runner.DagRunner', 'DagRunner', (['configuration'], {}), '(configuration)\n', (1846, 1861), False, 'from primrose.dag_runner import DagRunner\n')]
|
"""
Unit tests for the mqtt support class
Copyright 2022 <NAME>
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import context # add rvc2mqtt package to the python path using local reference
from rvc2mqtt.mqtt import *
## can't figure out how to unit test this..probably need to mock...but given this class is tightly coupled with
## paho mqtt not sure how useful....anyway..below is hack to test it with real mqtt server
class device1(object):
def __init__(self, name: str, mqtt_support:MQTT_Support):
self.device_topic = mqtt_support.make_device_topic_root(name)
self.status_topic = mqtt_support.make_device_topic_string(name, None, True)
self.set_topic = mqtt_support.make_device_topic_string(name, None, False)
mqtt_support.client.publish(self.status_topic, "unknown", retain=True)
mqtt_support.register(self, self.set_topic, self.got_message)
self.mqtt = mqtt_support
def got_message(self, topic, payload):
print(f"hello from device1 {topic} --- {payload.decode('utf-8')}")
self.mqtt.client.publish(self.status_topic, payload, retain=True)
if __name__ == '__main__':
#unittest.main()
mqs = MqttInitalize(Test_MQTT_Support.MQTT_BRIDGE_SETTINGS)
mqs.client.loop_start()
d1 = device1("try1", mqs)
mqs.client.publish(d1.set_topic, "here", retain=False)
import time
time.sleep(5)
mqs.shutdown()
|
[
"time.sleep"
] |
[((1918, 1931), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1928, 1931), False, 'import time\n')]
|
#!/usr/bin/env python
import os, sys, subprocess
from os.path import basename,dirname
import h5py
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import gdal
from gdalconst import *
from osgeo import ogr, osr
from datetime import datetime, date
def createImgSCISAT(fileAbsPath):
# read info from netcdf
ncfile = Dataset(fileAbsPath, 'r')
latitude = ncfile.groups['ACE-FTS-v2.2'].latitude
longitude = ncfile.groups['ACE-FTS-v2.2'].longitude
datestart = datetime.strptime(ncfile.groups['ACE-FTS-v2.2'].start_time,'%Y-%m-%d %H:%M:%S+00')
dateend = datetime.strptime(ncfile.groups['ACE-FTS-v2.2'].end_time,'%Y-%m-%d %H:%M:%S+00')
ozone = ncfile.groups['ACE-FTS-v2.2'].groups['Data-L2_1km_grid'].variables['O3'][:]
heightLevels = ncfile.groups['ACE-FTS-v2.2'].groups['Data-L2_1km_grid'].variables['z'][:]
numBand = len(ozone)
ncfile.close()
#common vars
no_value = -9999
minValue = ma.min(ozone)
maxValue = ma.max(ozone)
ma.set_fill_value(ozone, no_value)
ozone = ozone.filled()
#ma.set_fill_value(heightLevels, no_value)
#heightLevels = heightLevels.filled()
sizeX = 1
sizeY = 1
dataType = gdal.GDT_Float32
resolution = 1.0 # in degree
driver = gdal.GetDriverByName('GTiff' )
outFile = 'ACE-FTS_L2_ozone_'+datestart.strftime('%Y%m%d.%H%M%S')+'.tif'
#create tiff
dst_ds = driver.Create(outFile, sizeX, sizeY, numBand, dataType)
for i in range(numBand):
dst_ds.GetRasterBand(i+1).WriteArray(np.expand_dims(np.expand_dims(ozone[i],axis=0),axis=0))
# The computed stat produces this warning
# Warning 1: Lost metadata writing to GeoTIFF ... too large to fit in tag.
# An additional *.aux.xml is added
#if ozone[i] != no_value:
# dst_ds.GetRasterBand(i+1).ComputeStatistics(False)
dst_ds.GetRasterBand(i+1).SetNoDataValue(no_value)
#set geotrasform matrix
top_left_x = longitude - (resolution / 2)
w_e_pixel_resolution = resolution
top_left_y = latitude - (resolution / 2)
n_s_pixel_resolution = - resolution
coord = [top_left_x, w_e_pixel_resolution, 0, top_left_y,0, n_s_pixel_resolution]
dst_ds.SetGeoTransform(coord)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
dst_ds.SetProjection(srs.ExportToWkt())
#set metadata
dst_ds.SetMetadataItem('GLOBAL_MAX',str(maxValue))
dst_ds.SetMetadataItem('GLOBAL_MIN',str(minValue))
dst_ds.SetMetadataItem('TIME_END', dateend.strftime('%Y-%m-%dT%H:%M:%SZ'))
dst_ds.SetMetadataItem('TIME_START', datestart.strftime('%Y-%m-%dT%H:%M:%SZ'))
dst_ds.SetMetadataItem('VERTICAL_LEVELS_NUMBER', str(len(heightLevels)))
dst_ds.SetMetadataItem('VERTICAL_LEVELS', ','.join(str(x) for x in heightLevels))
dst_ds =None
return [outFile]
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('\nUsage: %s L2_SCISAT_file \n' % sys.argv[0] )
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: File %s was not found!\n' % sys.argv[1])
fileAbsPath = sys.argv[1]
outFileName = createImgSCISAT(fileAbsPath)
exit(0)
# else:
# Module is imported from another module
|
[
"netCDF4.Dataset",
"numpy.ma.min",
"gdal.GetDriverByName",
"numpy.ma.set_fill_value",
"os.path.exists",
"numpy.expand_dims",
"datetime.datetime.strptime",
"sys.exit",
"osgeo.osr.SpatialReference",
"numpy.ma.max"
] |
[((343, 368), 'netCDF4.Dataset', 'Dataset', (['fileAbsPath', '"""r"""'], {}), "(fileAbsPath, 'r')\n", (350, 368), False, 'from netCDF4 import Dataset\n'), ((495, 582), 'datetime.datetime.strptime', 'datetime.strptime', (["ncfile.groups['ACE-FTS-v2.2'].start_time", '"""%Y-%m-%d %H:%M:%S+00"""'], {}), "(ncfile.groups['ACE-FTS-v2.2'].start_time,\n '%Y-%m-%d %H:%M:%S+00')\n", (512, 582), False, 'from datetime import datetime, date\n'), ((592, 677), 'datetime.datetime.strptime', 'datetime.strptime', (["ncfile.groups['ACE-FTS-v2.2'].end_time", '"""%Y-%m-%d %H:%M:%S+00"""'], {}), "(ncfile.groups['ACE-FTS-v2.2'].end_time,\n '%Y-%m-%d %H:%M:%S+00')\n", (609, 677), False, 'from datetime import datetime, date\n'), ((957, 970), 'numpy.ma.min', 'ma.min', (['ozone'], {}), '(ozone)\n', (963, 970), True, 'import numpy.ma as ma\n'), ((986, 999), 'numpy.ma.max', 'ma.max', (['ozone'], {}), '(ozone)\n', (992, 999), True, 'import numpy.ma as ma\n'), ((1004, 1038), 'numpy.ma.set_fill_value', 'ma.set_fill_value', (['ozone', 'no_value'], {}), '(ozone, no_value)\n', (1021, 1038), True, 'import numpy.ma as ma\n'), ((1265, 1294), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (1285, 1294), False, 'import gdal\n'), ((2258, 2280), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (2278, 2280), False, 'from osgeo import ogr, osr\n'), ((2959, 3016), 'sys.exit', 'sys.exit', (['("""\nUsage: %s L2_SCISAT_file \n""" % sys.argv[0])'], {}), '("""\nUsage: %s L2_SCISAT_file \n""" % sys.argv[0])\n', (2967, 3016), False, 'import os, sys, subprocess\n'), ((3041, 3068), 'os.path.exists', 'os.path.exists', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3055, 3068), False, 'import os, sys, subprocess\n'), ((3082, 3143), 'sys.exit', 'sys.exit', (['("""\nERROR: File %s was not found!\n""" % sys.argv[1])'], {}), '("""\nERROR: File %s was not found!\n""" % sys.argv[1])\n', (3090, 3143), False, 'import os, sys, subprocess\n'), ((1554, 1586), 'numpy.expand_dims', 'np.expand_dims', (['ozone[i]'], {'axis': '(0)'}), '(ozone[i], axis=0)\n', (1568, 1586), True, 'import numpy as np\n')]
|
import logging
from os import write
import requests
from typing import Any, Dict
from cartpole import CartPole
from tensorboardX import SummaryWriter
class BonsaiAgent(object):
""" The agent that gets the action from the trained brain exported as docker image and started locally
"""
def act(self, state) -> Dict[str, Any]:
action = self.predict(state)
#simulator expects action to be integer
action["command"] = int(action["command"])
return action
def predict(self, state):
#local endpoint when running trained brain locally in docker container
url = "http://localhost:5000/v1/prediction"
response = requests.get(url, json=state)
action = response.json()
return action
class RandomAgent(object):
"""The world's simplest agent!"""
def __init__(self, cartpole: CartPole):
self.cartpole = cartpole
def act(self, state):
return cartpole.gym_to_action(cartpole._env.action_space.sample())
if __name__ == '__main__':
logging.basicConfig()
log = logging.getLogger("cartpole")
log.setLevel(level='INFO')
writer = SummaryWriter()
# we will use our environment (wrapper of OpenAI env)
cartpole = CartPole()
# specify which agent you want to use,
# BonsaiAgent that uses trained Brain or
# RandomAgent that randomly selects next action
agent = BonsaiAgent()
episode_count = 100
try:
for i in range(episode_count):
#start a new episode and get the new state
cartpole.episode_start()
state = cartpole.get_state()
cum_reward = 0
while True:
#get the action from the agent (based on the current state)
action = agent.act(state)
#do the next step of the simulation and get the new state
cartpole.episode_step(action)
state = cartpole.get_state()
#get the last reward and add it the episode reward
reward = cartpole.get_last_reward()
cum_reward += reward
if cartpole.halted():
writer.add_scalar("reward", cum_reward, i )
break
writer.flush()
cartpole.episode_finish("")
writer.close()
except KeyboardInterrupt:
print("Stopped")
|
[
"tensorboardX.SummaryWriter",
"logging.basicConfig",
"requests.get",
"cartpole.CartPole",
"logging.getLogger"
] |
[((1044, 1065), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1063, 1065), False, 'import logging\n'), ((1076, 1105), 'logging.getLogger', 'logging.getLogger', (['"""cartpole"""'], {}), "('cartpole')\n", (1093, 1105), False, 'import logging\n'), ((1151, 1166), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (1164, 1166), False, 'from tensorboardX import SummaryWriter\n'), ((1240, 1250), 'cartpole.CartPole', 'CartPole', ([], {}), '()\n', (1248, 1250), False, 'from cartpole import CartPole\n'), ((678, 707), 'requests.get', 'requests.get', (['url'], {'json': 'state'}), '(url, json=state)\n', (690, 707), False, 'import requests\n')]
|
"""
Packing and unpacking of floats in the IEEE 32-bit and 64-bit formats.
"""
import math
from pypy.rlib.rarithmetic import r_longlong, isinf, isnan, INFINITY, NAN
def pack_float(result, number, size, bigendian):
"""Append to 'result' the 'size' characters of the 32-bit or 64-bit
IEEE representation of the number.
"""
if size == 4:
bias = 127
exp = 8
prec = 23
else:
bias = 1023
exp = 11
prec = 52
if isnan(number):
sign = 0x80
man, e = 1.5, bias + 1
else:
if number < 0:
sign = 0x80
number *= -1
elif number == 0.0:
for i in range(size):
result.append('\x00')
return
else:
sign = 0x00
if isinf(number):
man, e = 1.0, bias + 1
else:
man, e = math.frexp(number)
if 0.5 <= man and man < 1.0:
man *= 2
e -= 1
man -= 1
e += bias
power_of_two = r_longlong(1) << prec
mantissa = r_longlong(power_of_two * man + 0.5)
if mantissa >> prec :
mantissa = 0
e += 1
for i in range(size-2):
result.append(chr(mantissa & 0xff))
mantissa >>= 8
x = (mantissa & ((1<<(15-exp))-1)) | ((e & ((1<<(exp-7))-1))<<(15-exp))
result.append(chr(x))
x = sign | e >> (exp - 7)
result.append(chr(x))
if bigendian:
first = len(result) - size
last = len(result) - 1
for i in range(size // 2):
(result[first + i], result[last - i]) = (
result[last - i], result[first + i])
def unpack_float(input, bigendian):
"""Interpret the 'input' string into a 32-bit or 64-bit
IEEE representation a the number.
"""
size = len(input)
bytes = []
if bigendian:
reverse_mask = size - 1
else:
reverse_mask = 0
nonzero = False
for i in range(size):
x = ord(input[i ^ reverse_mask])
bytes.append(x)
nonzero |= x
if not nonzero:
return 0.0
if size == 4:
bias = 127
exp = 8
prec = 23
else:
bias = 1023
exp = 11
prec = 52
mantissa_scale_factor = 0.5 ** prec # this is constant-folded if it's
# right after the 'if'
mantissa = r_longlong(bytes[size-2] & ((1<<(15-exp))-1))
for i in range(size-3, -1, -1):
mantissa = mantissa << 8 | bytes[i]
mantissa = 1 + mantissa * mantissa_scale_factor
mantissa *= 0.5
e = (bytes[-1] & 0x7f) << (exp - 7)
e += (bytes[size-2] >> (15 - exp)) & ((1<<(exp - 7)) -1)
e -= bias
e += 1
sign = bytes[-1] & 0x80
if e == bias + 2:
if mantissa == 0.5:
number = INFINITY
else:
return NAN
else:
number = math.ldexp(mantissa,e)
if sign : number = -number
return number
|
[
"pypy.rlib.rarithmetic.r_longlong",
"pypy.rlib.rarithmetic.isnan",
"pypy.rlib.rarithmetic.isinf",
"math.frexp",
"math.ldexp"
] |
[((479, 492), 'pypy.rlib.rarithmetic.isnan', 'isnan', (['number'], {}), '(number)\n', (484, 492), False, 'from pypy.rlib.rarithmetic import r_longlong, isinf, isnan, INFINITY, NAN\n'), ((1048, 1084), 'pypy.rlib.rarithmetic.r_longlong', 'r_longlong', (['(power_of_two * man + 0.5)'], {}), '(power_of_two * man + 0.5)\n', (1058, 1084), False, 'from pypy.rlib.rarithmetic import r_longlong, isinf, isnan, INFINITY, NAN\n'), ((2355, 2404), 'pypy.rlib.rarithmetic.r_longlong', 'r_longlong', (['(bytes[size - 2] & (1 << 15 - exp) - 1)'], {}), '(bytes[size - 2] & (1 << 15 - exp) - 1)\n', (2365, 2404), False, 'from pypy.rlib.rarithmetic import r_longlong, isinf, isnan, INFINITY, NAN\n'), ((795, 808), 'pypy.rlib.rarithmetic.isinf', 'isinf', (['number'], {}), '(number)\n', (800, 808), False, 'from pypy.rlib.rarithmetic import r_longlong, isinf, isnan, INFINITY, NAN\n'), ((1011, 1024), 'pypy.rlib.rarithmetic.r_longlong', 'r_longlong', (['(1)'], {}), '(1)\n', (1021, 1024), False, 'from pypy.rlib.rarithmetic import r_longlong, isinf, isnan, INFINITY, NAN\n'), ((2851, 2874), 'math.ldexp', 'math.ldexp', (['mantissa', 'e'], {}), '(mantissa, e)\n', (2861, 2874), False, 'import math\n'), ((880, 898), 'math.frexp', 'math.frexp', (['number'], {}), '(number)\n', (890, 898), False, 'import math\n')]
|
import torchvision.transforms as T
def aug_transform(crop, base_transform, cfg, extra_t=[]):
""" augmentation transform generated from config """
return T.Compose(
[
T.RandomApply(
[T.ColorJitter(cfg.cj0, cfg.cj1, cfg.cj2, cfg.cj3)], p=cfg.cj_p
),
T.RandomGrayscale(p=cfg.gs_p),
T.RandomResizedCrop(
crop,
scale=(cfg.crop_s0, cfg.crop_s1),
ratio=(cfg.crop_r0, cfg.crop_r1),
interpolation=3,
),
T.RandomHorizontalFlip(p=cfg.hf_p),
*extra_t,
base_transform(),
]
)
class MultiSample:
""" generates n samples with augmentation """
def __init__(self, transform, n=2):
self.transform = transform
self.num = n
def __call__(self, x):
return tuple(self.transform(x) for _ in range(self.num))
|
[
"torchvision.transforms.ColorJitter",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomGrayscale"
] |
[((318, 347), 'torchvision.transforms.RandomGrayscale', 'T.RandomGrayscale', ([], {'p': 'cfg.gs_p'}), '(p=cfg.gs_p)\n', (335, 347), True, 'import torchvision.transforms as T\n'), ((361, 476), 'torchvision.transforms.RandomResizedCrop', 'T.RandomResizedCrop', (['crop'], {'scale': '(cfg.crop_s0, cfg.crop_s1)', 'ratio': '(cfg.crop_r0, cfg.crop_r1)', 'interpolation': '(3)'}), '(crop, scale=(cfg.crop_s0, cfg.crop_s1), ratio=(cfg.\n crop_r0, cfg.crop_r1), interpolation=3)\n', (380, 476), True, 'import torchvision.transforms as T\n'), ((564, 598), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': 'cfg.hf_p'}), '(p=cfg.hf_p)\n', (586, 598), True, 'import torchvision.transforms as T\n'), ((228, 277), 'torchvision.transforms.ColorJitter', 'T.ColorJitter', (['cfg.cj0', 'cfg.cj1', 'cfg.cj2', 'cfg.cj3'], {}), '(cfg.cj0, cfg.cj1, cfg.cj2, cfg.cj3)\n', (241, 277), True, 'import torchvision.transforms as T\n')]
|
import argparse
from torch import cuda
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--directory', metavar='EXPORT_DIR', help='destination where trained network should be saved')
parser.add_argument('--training-dataset', default='GLDv2', help='training dataset: (default: GLDv2)')
parser.add_argument('--imsize', default=1024, type=int, metavar='N', help='maximum size of longer image side used for training (default: 1024)')
parser.add_argument('--num-workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 8)')
parser.add_argument('--device', type=str, default='cuda' if cuda.is_available() else 'cpu')
parser.add_argument('--num-epochs', default=100, type=int, metavar='N', help='number of total epochs to run (default: 100)')
parser.add_argument('--batch-size', '-b', default=5, type=int, metavar='N', help='number of (q,p,n1,...,nN) tuples in a mini-batch (default: 5)')
parser.add_argument('--update-every', '-u', default=1, type=int, metavar='N', help='update model weights every N batches, used to handle really large batches, ' + 'batch_size effectively becomes update_every x batch_size (default: 1)')
parser.add_argument('--resume', default=None, type=str, metavar='FILENAME', help='name of the latest checkpoint (default: None)')
parser.add_argument('--warmup-epochs', type=int, default=0, help='learning rate will be linearly scaled during warm up period')
parser.add_argument('--val-epoch', type=int, default=1)
parser.add_argument('--warmup-lr', type=float, default=0, help='Initial warmup learning rate')
parser.add_argument('--base-lr', type=float, default=1e-6)
parser.add_argument('--final-lr', type=float, default=0)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=1e-6)
parser.add_argument('--rank', type=int, default=None)
parser.add_argument('--world_size', type=int, default=None)
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--dist_backend', type=str, default='nccl')
parser.add_argument('--dist_url', type=str, default='tcp://127.0.0.1:29324')
parser.add_argument('--distributed', action='store_true')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--clip_max_norm', type=float, default=0)
args = parser.parse_args()
return args
|
[
"torch.cuda.is_available",
"argparse.ArgumentParser"
] |
[((70, 95), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (93, 95), False, 'import argparse\n'), ((661, 680), 'torch.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (678, 680), False, 'from torch import cuda\n')]
|
import os
from flask import Flask
from flask import request
from flask import render_template
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
elif request.method == 'POST':
data = request.form
text = '''[spotify]
username = {}
password = {}
client_id = {}
client_secret = {}
private_session = true
[mpd]
enabled = true
'''.format(data['username'],
data['password'],
data['client_id'],
data['client_secret'])
text_file = open("/etc/mopidy/mopidy.conf", "w")
text_file.write(text)
text_file.close()
print("Service restart:", os.system('sudo systemctl restart mopidy.service'))
return render_template('index.html', flash="Credentials set!")
@app.route('/alarm', methods=['POST'])
def alarm():
cron_text = '{} {} * * * mpc clear && mpc add {} && mpc play'.format(
request.form['minutes'],
request.form['hours'],
request.form['spotify_uri']
)
remove_cron_command = 'sudo crontab -r'
cron_set_command = '(sudo crontab -l ; echo "{}") | sort - | uniq - | sudo crontab -'.format(cron_text)
print("Removing old crontabs:", os.system(remove_cron_command))
print("Setting crontab:", os.system(cron_set_command))
return render_template('index.html', flash="Alarm set!")
|
[
"flask.Flask",
"os.system",
"flask.render_template"
] |
[((101, 116), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (106, 116), False, 'from flask import Flask\n'), ((1411, 1460), 'flask.render_template', 'render_template', (['"""index.html"""'], {'flash': '"""Alarm set!"""'}), "('index.html', flash='Alarm set!')\n", (1426, 1460), False, 'from flask import render_template\n'), ((220, 249), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (235, 249), False, 'from flask import render_template\n'), ((1304, 1334), 'os.system', 'os.system', (['remove_cron_command'], {}), '(remove_cron_command)\n', (1313, 1334), False, 'import os\n'), ((1366, 1393), 'os.system', 'os.system', (['cron_set_command'], {}), '(cron_set_command)\n', (1375, 1393), False, 'import os\n'), ((821, 876), 'flask.render_template', 'render_template', (['"""index.html"""'], {'flash': '"""Credentials set!"""'}), "('index.html', flash='Credentials set!')\n", (836, 876), False, 'from flask import render_template\n'), ((753, 803), 'os.system', 'os.system', (['"""sudo systemctl restart mopidy.service"""'], {}), "('sudo systemctl restart mopidy.service')\n", (762, 803), False, 'import os\n')]
|
"""
Rhino Python Script Tutorial
Exercise 01
Draw point at origin.
Important note: Python is very sensitive to indentation.
Notice how the rs.AddPoint statement is indented inwards.
This means that it is part of the Main method.
All this will be clear in due time.
"""
import rhinoscriptsyntax as rs
import math
def Main():
rs.AddPoint([0,0,0])
Main()
|
[
"rhinoscriptsyntax.AddPoint"
] |
[((359, 381), 'rhinoscriptsyntax.AddPoint', 'rs.AddPoint', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (370, 381), True, 'import rhinoscriptsyntax as rs\n')]
|
import time
import random
import numpy as np
import pandas as pds
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchdiffeq import odeint_adjoint as odeint
from config import D_TYPE
def get_data():
Arg = namedtuple('Arg', ['method', 'data_size', 'batch_time', 'batch_size',
'niters', 'test_freq', 'viz', 'gpu', 'adjoint'])
args = Arg('dopri5', 1000, 20, 1, 2000, 50, False, 1, True)
true_y0 = torch.tensor([[0.0, 5.0]])
t = torch.linspace(0, 5, args.data_size + 1)
class Lambda(nn.Module):
def __init__(self, w0, f0, w1, dr):
super(Lambda, self).__init__()
self.w0 = torch.tensor(w0)
self.f0 = torch.tensor(f0)
self.w1 = torch.tensor(w1)
self.dr = torch.tensor(dr)
def force(self, t):
return self.f0 * torch.sin(self.w1 * t)
def forward(self, t, y):
dy0_dt = y[:, 1]
dy1_dt = -2. * self.w0 * y[:, 1] * self.dr - self.w0 ** 2 * y[:, 0] + self.force(t)
return torch.cat((dy0_dt.reshape(-1, 1), dy1_dt.reshape(-1, 1)), axis=1)
# This numerical solution given the true DE.
with torch.no_grad():
lam = Lambda(5., 5., 3., 0.01)
true_y = odeint(lam, true_y0, t, method='dopri5')
dat_dict = dict()
for s in range(args.data_size - args.batch_time):
batch_t = t[s:s+args.batch_time]
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0)
dim = batch_y.shape[-1]
x_reshaped = batch_y.reshape(args.batch_time, 1, 1, dim)
dat_dict[str(s)] = dict(t=batch_t, x=x_reshaped)
return dat_dict
|
[
"collections.namedtuple",
"torch.linspace",
"torch.no_grad",
"torch.sin",
"torch.tensor",
"torchdiffeq.odeint_adjoint"
] |
[((274, 396), 'collections.namedtuple', 'namedtuple', (['"""Arg"""', "['method', 'data_size', 'batch_time', 'batch_size', 'niters', 'test_freq',\n 'viz', 'gpu', 'adjoint']"], {}), "('Arg', ['method', 'data_size', 'batch_time', 'batch_size',\n 'niters', 'test_freq', 'viz', 'gpu', 'adjoint'])\n", (284, 396), False, 'from collections import namedtuple\n'), ((501, 527), 'torch.tensor', 'torch.tensor', (['[[0.0, 5.0]]'], {}), '([[0.0, 5.0]])\n', (513, 527), False, 'import torch\n'), ((536, 576), 'torch.linspace', 'torch.linspace', (['(0)', '(5)', '(args.data_size + 1)'], {}), '(0, 5, args.data_size + 1)\n', (550, 576), False, 'import torch\n'), ((1235, 1250), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1248, 1250), False, 'import torch\n'), ((1308, 1348), 'torchdiffeq.odeint_adjoint', 'odeint', (['lam', 'true_y0', 't'], {'method': '"""dopri5"""'}), "(lam, true_y0, t, method='dopri5')\n", (1314, 1348), True, 'from torchdiffeq import odeint_adjoint as odeint\n'), ((717, 733), 'torch.tensor', 'torch.tensor', (['w0'], {}), '(w0)\n', (729, 733), False, 'import torch\n'), ((756, 772), 'torch.tensor', 'torch.tensor', (['f0'], {}), '(f0)\n', (768, 772), False, 'import torch\n'), ((795, 811), 'torch.tensor', 'torch.tensor', (['w1'], {}), '(w1)\n', (807, 811), False, 'import torch\n'), ((834, 850), 'torch.tensor', 'torch.tensor', (['dr'], {}), '(dr)\n', (846, 850), False, 'import torch\n'), ((909, 931), 'torch.sin', 'torch.sin', (['(self.w1 * t)'], {}), '(self.w1 * t)\n', (918, 931), False, 'import torch\n')]
|
import json
class Model:
def __init__(self, zacetni_seznam_nalog, tema=''):
self.naloge = zacetni_seznam_nalog
self.aktualna_naloga = None
self.tema = tema
def dodaj_novo_nalogo(self, naloga):
self.naloge.append(naloga)
def v_slovar(self):
seznam_nalog = [
naloga.v_slovar() for naloga in self.naloge
]
return {
"naloge": seznam_nalog,
"tema": self.tema,
}
def stevilo_vseh_nalog(self):
return len(self.naloge)
@staticmethod
def iz_slovarja(slovar):
sez = [
Naloga.iz_slovarja(sl_naloga) for sl_naloga in slovar["naloge"]
]
return Model(
sez,
slovar["tema"],
)
def shrani_v_datoteko(self, ime_datoteke='stanje.json'):
with open(ime_datoteke, "w") as dat:
slovar = self.v_slovar()
json.dump(slovar, dat)
@staticmethod
def preberi_iz_datoteke(ime_datoteke='stanje.json'):
with open(ime_datoteke) as dat:
slovar = json.load(dat)
return Model.iz_slovarja(slovar)
def preveri_podatke_nove_naloge(self, ime):
napake = {}
if not ime:
napake['ime'] = 'Ime ne sme bitit prazno!'
elif len(ime) > 20:
napake['ime'] = 'Ime lahko vsebuje najvec 20 znakov.'
return napake
@staticmethod
def naredi_svezega():
n1 = Naloga("Napoleon", "Kdaj se je rodil?", "15.8.1769")
n2 = Naloga('New York', "Kje lezi?", "Severna Amerika")
n3 = Naloga('Olimpijske igre',
"Kdo je osvoji zlato medaljo za Slovenijo?", "<NAME>")
n4 = Naloga(
'You Tube', "Kako je ime prvemu videu objavlenemu na You Tubu?", "Me at the ZOO")
n5 = Naloga('Kardashianovi', "Koliko otrok ima <NAME>?", "6")
n6 = Naloga('Ameriski predsedniki',
"Kako je bilo ima prvemu ameriskemu predsedniku?", "<NAME>")
seznam = [n1, n2, n3, n4, n5, n6]
m = Model(seznam, "test")
return m
class Naloga:
def __init__(self, ime, besedilo, pravilna_resitev, moja_resitev=None):
self.ime = ime
self.besedilo = besedilo
self.pravilna_resitev = pravilna_resitev
self.moja_resitev = moja_resitev
def naloga_je_resena(self):
return self.moja_resitev and self.pravilna_resitev.lower().strip() == self.moja_resitev.lower().strip()
def v_slovar(self):
return {
"ime": self.ime,
"besedilo": self.besedilo,
"pravilna resitev": self.pravilna_resitev,
"moja resitev": self.moja_resitev,
}
@staticmethod
def iz_slovarja(slovar):
return Naloga(
slovar["ime"],
slovar["besedilo"],
slovar["pravilna resitev"],
slovar["moja resitev"],
)
|
[
"json.dump",
"json.load"
] |
[((926, 948), 'json.dump', 'json.dump', (['slovar', 'dat'], {}), '(slovar, dat)\n', (935, 948), False, 'import json\n'), ((1086, 1100), 'json.load', 'json.load', (['dat'], {}), '(dat)\n', (1095, 1100), False, 'import json\n')]
|
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from drf_haystack.viewsets import HaystackViewSet
from rest_flex_fields.views import FlexFieldsMixin
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework_extensions.cache.mixins import CacheResponseMixin
from outpost.django.api.permissions import ExtendedDjangoModelPermissions
from outpost.django.base.decorators import docstring_format
from . import filters, key_constructors, models, serializers
# from rest_framework_extensions.mixins import (
# CacheResponseAndETAGMixin,
# )
# from rest_framework_extensions.cache.mixins import (
# CacheResponseMixin,
# )
class RoomCategoryViewSet(CacheResponseMixin, ReadOnlyModelViewSet):
queryset = models.RoomCategory.objects.all()
serializer_class = serializers.RoomCategorySerializer
object_cache_key_func = key_constructors.PersonKeyConstructor()
list_cache_key_func = key_constructors.PersonKeyConstructor()
permission_classes = (AllowAny,)
class RoomViewSet(ReadOnlyModelViewSet):
queryset = models.Room.objects.all()
serializer_class = serializers.RoomSerializer
permission_classes = (AllowAny,)
filter_fields = ("category",)
class FloorViewSet(ReadOnlyModelViewSet):
queryset = models.Floor.objects.all()
serializer_class = serializers.FloorSerializer
permission_classes = (AllowAny,)
class BuildingViewSet(ReadOnlyModelViewSet):
queryset = models.Building.objects.all()
serializer_class = serializers.BuildingSerializer
permission_classes = (AllowAny,)
@docstring_format(
model=models.Function.__doc__,
filter=filters.FunctionFilter.__doc__,
serializer=serializers.FunctionSerializer.__doc__,
)
class FunctionViewSet(FlexFieldsMixin, ReadOnlyModelViewSet):
"""
List organizational functions from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.Function.objects.all()
serializer_class = serializers.FunctionSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.FunctionFilter
permission_classes = (IsAuthenticated,)
permit_list_expands = ("persons",)
@docstring_format(
model=models.Organization.__doc__,
filter=filters.OrganizationFilter.__doc__,
serializer=serializers.OrganizationSerializer.__doc__,
)
class OrganizationViewSet(FlexFieldsMixin, ReadOnlyModelViewSet):
"""
List organizations from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.Organization.objects.all()
serializer_class = serializers.OrganizationSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.OrganizationFilter
permission_classes = (AllowAny,)
permit_list_expands = ("persons", "persons_leave", "publication_authorship")
def get_serializer_class(self):
if self.request.user and self.request.user.is_authenticated():
return serializers.AuthenticatedOrganizationSerializer
else:
return self.serializer_class
def get_serializer_context(self):
return {"request": self.request}
@docstring_format(
model=models.Person.__doc__,
filter=filters.PersonFilter.__doc__,
serializer=serializers.PersonSerializer.__doc__,
)
class PersonViewSet(FlexFieldsMixin, ReadOnlyModelViewSet):
"""
List staff accounts from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.Person.objects.all()
serializer_class = serializers.PersonSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.PersonFilter
permission_classes = (AllowAny,)
permit_list_expands = (
"functions",
"organizations",
"organizations_leave",
"classifications",
"expertise",
"knowledge",
"education",
)
def get_serializer_class(self):
if self.request.user and self.request.user.is_authenticated():
return serializers.AuthenticatedPersonSerializer
else:
return self.serializer_class
def get_serializer_context(self):
return {"request": self.request}
def get_queryset(self):
qs = super().get_queryset()
if self.request.user and self.request.user.is_authenticated():
return qs
else:
return qs.filter(employed=True)
@docstring_format(
model=models.Student.__doc__,
filter=filters.StudentFilter.__doc__,
serializer=serializers.StudentSerializer.__doc__,
)
class StudentViewSet(ReadOnlyModelViewSet):
"""
List student accounts from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.Student.objects.all()
serializer_class = serializers.StudentSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.StudentFilter
permission_classes = (IsAuthenticated,)
def get_serializer_class(self):
if self.request.user and self.request.user.is_authenticated():
return serializers.AuthenticatedStudentSerializer
else:
return self.serializer_class
@docstring_format(filter=filters.PersonOrganizationFunctionFilter.__doc__)
class PersonOrganizationFunctionViewSet(ReadOnlyModelViewSet):
"""
Map person to organizational unit and function through CAMPUSonline.
{filter}
"""
queryset = models.PersonOrganizationFunction.objects.all()
serializer_class = serializers.PersonOrganizationFunctionSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.PersonOrganizationFunctionFilter
permission_classes = (IsAuthenticated,)
@docstring_format(
model=models.DistributionList.__doc__,
filter=filters.DistributionListFilter.__doc__,
serializer=serializers.DistributionListSerializer.__doc__,
)
class DistributionListViewSet(
CacheResponseMixin, FlexFieldsMixin, ReadOnlyModelViewSet
):
"""
List distribution lists from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.DistributionList.objects.all()
serializer_class = serializers.DistributionListSerializer
object_cache_key_func = key_constructors.DistributionListKeyConstructor()
list_cache_key_func = key_constructors.DistributionListKeyConstructor()
filter_backends = (DjangoFilterBackend,)
filter_class = filters.DistributionListFilter
permission_classes = (IsAuthenticated,)
permit_list_expands = ("persons", "students")
@docstring_format(model=models.Event.__doc__, filter=filters.EventFilter.__doc__)
class EventViewSet(ReadOnlyModelViewSet):
"""
List events from CAMPUSonline.
{model}
{filter}
"""
queryset = models.Event.objects.all()
serializer_class = serializers.EventSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.EventFilter
permission_classes = (AllowAny,)
def get_queryset(self):
return self.queryset.filter(show_end__gte=timezone.now())
class CourseGroupTermViewSet(ReadOnlyModelViewSet):
queryset = models.CourseGroupTerm.objects.all()
serializer_class = serializers.CourseGroupTermSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.CourseGroupTermFilter
permission_classes = (IsAuthenticated,)
@docstring_format(model=models.Bulletin.__doc__, filter=filters.BulletinFilter.__doc__)
class BulletinViewSet(ReadOnlyModelViewSet):
"""
List official bulletins from CAMPUSonline.
{model}
{filter}
"""
queryset = models.Bulletin.objects.all()
serializer_class = serializers.BulletinSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.BulletinFilter
@docstring_format(
model=models.BulletinPage.__doc__, filter=filters.BulletinPageFilter.__doc__
)
class BulletinPageViewSet(ReadOnlyModelViewSet):
"""
List official bulletin pages with extracted text from CAMPUSonline.
{model}
{filter}
"""
queryset = models.BulletinPage.objects.all()
serializer_class = serializers.BulletinPageSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.BulletinPageFilter
permission_classes = (AllowAny,)
class BulletinPageSearchViewSet(HaystackViewSet):
index_models = [models.BulletinPage]
serializer_class = serializers.BulletinPageSearchSerializer
permission_classes = (AllowAny,)
@docstring_format(
model=models.FinalThesis.__doc__,
filter=filters.FinalThesisFilter.__doc__,
serializer=serializers.FinalThesisSerializer.__doc__,
)
class FinalThesisViewSet(FlexFieldsMixin, ReadOnlyModelViewSet):
"""
List final thesis from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.FinalThesis.objects.all()
serializer_class = serializers.FinalThesisSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.FinalThesisFilter
permission_classes = (IsAuthenticated,)
permit_list_expands = ("author", "tutor", "organization")
|
[
"django.utils.timezone.now",
"outpost.django.base.decorators.docstring_format"
] |
[((1672, 1814), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'model': 'models.Function.__doc__', 'filter': 'filters.FunctionFilter.__doc__', 'serializer': 'serializers.FunctionSerializer.__doc__'}), '(model=models.Function.__doc__, filter=filters.\n FunctionFilter.__doc__, serializer=serializers.FunctionSerializer.__doc__)\n', (1688, 1814), False, 'from outpost.django.base.decorators import docstring_format\n'), ((2272, 2431), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'model': 'models.Organization.__doc__', 'filter': 'filters.OrganizationFilter.__doc__', 'serializer': 'serializers.OrganizationSerializer.__doc__'}), '(model=models.Organization.__doc__, filter=filters.\n OrganizationFilter.__doc__, serializer=serializers.\n OrganizationSerializer.__doc__)\n', (2288, 2431), False, 'from outpost.django.base.decorators import docstring_format\n'), ((3234, 3370), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'model': 'models.Person.__doc__', 'filter': 'filters.PersonFilter.__doc__', 'serializer': 'serializers.PersonSerializer.__doc__'}), '(model=models.Person.__doc__, filter=filters.PersonFilter.\n __doc__, serializer=serializers.PersonSerializer.__doc__)\n', (3250, 3370), False, 'from outpost.django.base.decorators import docstring_format\n'), ((4485, 4624), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'model': 'models.Student.__doc__', 'filter': 'filters.StudentFilter.__doc__', 'serializer': 'serializers.StudentSerializer.__doc__'}), '(model=models.Student.__doc__, filter=filters.StudentFilter\n .__doc__, serializer=serializers.StudentSerializer.__doc__)\n', (4501, 4624), False, 'from outpost.django.base.decorators import docstring_format\n'), ((5239, 5312), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'filter': 'filters.PersonOrganizationFunctionFilter.__doc__'}), '(filter=filters.PersonOrganizationFunctionFilter.__doc__)\n', (5255, 5312), False, 'from outpost.django.base.decorators import docstring_format\n'), ((5767, 5938), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'model': 'models.DistributionList.__doc__', 'filter': 'filters.DistributionListFilter.__doc__', 'serializer': 'serializers.DistributionListSerializer.__doc__'}), '(model=models.DistributionList.__doc__, filter=filters.\n DistributionListFilter.__doc__, serializer=serializers.\n DistributionListSerializer.__doc__)\n', (5783, 5938), False, 'from outpost.django.base.decorators import docstring_format\n'), ((6608, 6693), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'model': 'models.Event.__doc__', 'filter': 'filters.EventFilter.__doc__'}), '(model=models.Event.__doc__, filter=filters.EventFilter.__doc__\n )\n', (6624, 6693), False, 'from outpost.django.base.decorators import docstring_format\n'), ((7426, 7517), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'model': 'models.Bulletin.__doc__', 'filter': 'filters.BulletinFilter.__doc__'}), '(model=models.Bulletin.__doc__, filter=filters.\n BulletinFilter.__doc__)\n', (7442, 7517), False, 'from outpost.django.base.decorators import docstring_format\n'), ((7837, 7936), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'model': 'models.BulletinPage.__doc__', 'filter': 'filters.BulletinPageFilter.__doc__'}), '(model=models.BulletinPage.__doc__, filter=filters.\n BulletinPageFilter.__doc__)\n', (7853, 7936), False, 'from outpost.django.base.decorators import docstring_format\n'), ((8534, 8690), 'outpost.django.base.decorators.docstring_format', 'docstring_format', ([], {'model': 'models.FinalThesis.__doc__', 'filter': 'filters.FinalThesisFilter.__doc__', 'serializer': 'serializers.FinalThesisSerializer.__doc__'}), '(model=models.FinalThesis.__doc__, filter=filters.\n FinalThesisFilter.__doc__, serializer=serializers.FinalThesisSerializer\n .__doc__)\n', (8550, 8690), False, 'from outpost.django.base.decorators import docstring_format\n'), ((7102, 7116), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (7114, 7116), False, 'from django.utils import timezone\n')]
|
from __future__ import print_function
import torch.autograd as autograd
from torch.autograd import Variable
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import random
import numpy as np
import sys
import os
from ffn import FFN
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
from helpers.model_helper import *
import csv
class ConvFFN(FFN):
'''
This subclass inherits from the LSTM class and
adds 1d convolution over the input time.
'''
def __init__(self, hidden_dim, otu_handler, slice_len,
use_gpu=False):
super(ConvFFN, self).__init__(hidden_dim, otu_handler,
slice_len,
use_gpu=use_gpu)
self.conv_element = nn.Sequential(
nn.Conv1d(self.otu_handler.num_strains, hidden_dim,
kernel_size=4, stride=2, padding=3),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=1, stride=1, padding=0),
nn.ReLU(),
)
self.time_transformer = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU()
)
self.deconv_element = nn.Sequential(
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.otu_handler.num_strains,
kernel_size=4, stride=2, padding=3),
nn.ReLU()
)
# self.lin_final = nn.Linear(self.otu_handler.num_strains,
# self.otu_handler.num_strains)
def forward(self, data):
# data is shape: sequence_size x batch x num_strains
data = data.transpose(0, 1).transpose(1, 2)
# print(data.size())
data = self.conv_element(data)
# print(data.size())
data = data.transpose(0, 2).transpose(1, 2)
data = self.time_transformer(data)
# print(data.size())
data = self.deconv_element(data.transpose(0,1).transpose(1,2))
# print(data.size())
return data
|
[
"torch.nn.ReLU",
"torch.nn.Conv1d",
"torch.nn.ConvTranspose1d",
"torch.nn.Linear",
"os.path.join"
] |
[((315, 349), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../.."""'], {}), "(sys.path[0], '../..')\n", (327, 349), False, 'import os\n'), ((846, 937), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.otu_handler.num_strains', 'hidden_dim'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(3)'}), '(self.otu_handler.num_strains, hidden_dim, kernel_size=4, stride=2,\n padding=3)\n', (855, 937), True, 'import torch.nn as nn\n'), ((969, 978), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (976, 978), True, 'import torch.nn as nn\n'), ((992, 1071), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(2)', 'stride': '(1)', 'padding': '(1)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=2, stride=1, padding=1)\n', (1001, 1071), True, 'import torch.nn as nn\n'), ((1108, 1117), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1115, 1117), True, 'import torch.nn as nn\n'), ((1131, 1210), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(2)', 'stride': '(1)', 'padding': '(1)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=2, stride=1, padding=1)\n', (1140, 1210), True, 'import torch.nn as nn\n'), ((1246, 1255), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1253, 1255), True, 'import torch.nn as nn\n'), ((1269, 1348), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(2)', 'stride': '(1)', 'padding': '(1)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=2, stride=1, padding=1)\n', (1278, 1348), True, 'import torch.nn as nn\n'), ((1384, 1393), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1391, 1393), True, 'import torch.nn as nn\n'), ((1407, 1486), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(2)', 'stride': '(1)', 'padding': '(1)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=2, stride=1, padding=1)\n', (1416, 1486), True, 'import torch.nn as nn\n'), ((1522, 1531), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1529, 1531), True, 'import torch.nn as nn\n'), ((1545, 1624), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=1, stride=1, padding=0)\n', (1554, 1624), True, 'import torch.nn as nn\n'), ((1660, 1669), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1667, 1669), True, 'import torch.nn as nn\n'), ((1741, 1774), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (1750, 1774), True, 'import torch.nn as nn\n'), ((1788, 1797), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1795, 1797), True, 'import torch.nn as nn\n'), ((1811, 1844), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (1820, 1844), True, 'import torch.nn as nn\n'), ((1858, 1867), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1865, 1867), True, 'import torch.nn as nn\n'), ((1936, 2029), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=1, stride=\n 1, padding=0)\n', (1954, 2029), True, 'import torch.nn as nn\n'), ((2038, 2047), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2045, 2047), True, 'import torch.nn as nn\n'), ((2061, 2154), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(2)', 'stride': '(1)', 'padding': '(1)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=2, stride=\n 1, padding=1)\n', (2079, 2154), True, 'import torch.nn as nn\n'), ((2194, 2203), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2201, 2203), True, 'import torch.nn as nn\n'), ((2217, 2310), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(2)', 'stride': '(1)', 'padding': '(1)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=2, stride=\n 1, padding=1)\n', (2235, 2310), True, 'import torch.nn as nn\n'), ((2350, 2359), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2357, 2359), True, 'import torch.nn as nn\n'), ((2373, 2466), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(2)', 'stride': '(1)', 'padding': '(1)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=2, stride=\n 1, padding=1)\n', (2391, 2466), True, 'import torch.nn as nn\n'), ((2506, 2515), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2513, 2515), True, 'import torch.nn as nn\n'), ((2529, 2622), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['self.hidden_dim', 'self.hidden_dim'], {'kernel_size': '(2)', 'stride': '(1)', 'padding': '(1)'}), '(self.hidden_dim, self.hidden_dim, kernel_size=2, stride=\n 1, padding=1)\n', (2547, 2622), True, 'import torch.nn as nn\n'), ((2662, 2671), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2669, 2671), True, 'import torch.nn as nn\n'), ((2685, 2790), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['self.hidden_dim', 'self.otu_handler.num_strains'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(3)'}), '(self.hidden_dim, self.otu_handler.num_strains,\n kernel_size=4, stride=2, padding=3)\n', (2703, 2790), True, 'import torch.nn as nn\n'), ((2831, 2840), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2838, 2840), True, 'import torch.nn as nn\n')]
|
# -----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from deblur.support_files import pos_db, neg_db
import os.path
class supportFilesTests(TestCase):
"""Test the supporting data files
"""
def test_reference_(self):
"""Test if the positive and negative filtering
reference fasta files exist
"""
# the positive filtering fasta file
self.assertTrue(os.path.isfile(pos_db))
# the negative filtering fasta file
self.assertTrue(os.path.isfile(neg_db))
if __name__ == '__main__':
main()
|
[
"unittest.main"
] |
[((885, 891), 'unittest.main', 'main', ([], {}), '()\n', (889, 891), False, 'from unittest import TestCase, main\n')]
|
"""Class for converter."""
import numpy as np
import math
import cmath
import scipy
import logging
from scipy import signal
from scipy.integrate import odeint,ode
#from converter_utilities import plot_signal, plot_FFT
import converter_utilities
import config
from models import InverterModels
class PowerElectronicConverter:
"""
Converter base class.
Attributes:
count (int): Number of converter objects.
"""
count = 0 #Object count
def __init__(self,model_type):
"""Creates an instance of `Converter`.
Args:
fsw (float): Switching frequency in Hz.
Raises:
ValueError: If parameters corresponding to `Sinverter_rated` are not available.
"""
PowerElectronicConverter.count = PowerElectronicConverter.count+1 #Increment count to keep track of number of converter model instances
self.name = 'converter_'+str(PowerElectronicConverter.count) #Generate a name for the instance
self.model_type = model_type
"""
if self.model_type is 'switching':
assert self.signal_type is 'square_wave' or self.signal_type is 'sinePWM', 'Switching model needs square or sine PWM as switching signal!'
if self.model_type is 'average':
assert self.signal_type is 'duty_cycle', 'Average model needs duty_cycle as switching signal!'
"""
def check_model_type(self,model_type):
"""Check if model type is valid."""
assert model_type in self.model_types, f'{model_type} is not a valid model type!'
def show_spec(self):
"""Print the specs."""
print('Model type:{}'.format(self.model_type))
print('Switching signal type:{}'.format(self.signal_type))
def calc_primary(self,signal):
"""Calculate the primary switch."""
assert isinstance(signal,bool), 'Switching signal must be boolean.'
Sprimary = int(signal)
return Sprimary
def calc_complimentary(self,signal):
"""Calculate the complimentary."""
assert isinstance(signal,bool), 'Switching signal must be boolean.'
Scomplimentary = int(not signal)
return Scomplimentary
def calc_average(self,m):
"""Calculate average voltage."""
return Vdc
#Current controller dynamics
class PowerElectronicInverter(PowerElectronicConverter,InverterModels):
"""
Inverter class.
Attributes:
():
"""
Rf = 0.01
Lf = 1.0e-3
Rload = 1.0
inverter_types = ['single_phase_half_bridge','single_phase_full_bridge',
'three_phase_full_bridge']
model_types = ['EMT_switching','EMT_average','dynamic_phasor']
def __init__(self,Vdc,model_type = 'EMT_average',inverter_type='single_phase_half_bridge'):
"""Creates an instance of `Converter`.
Args:
Vdc (float): DC link voltage.
Raises:
ValueError: To be added.
"""
self.check_model_type(model_type)
super().__init__(model_type) #Initialize converter class (base class)
self.update_Vdc(Vdc)
self.inverter_type =inverter_type
@property #Decorator used for auto updating
def y(self):
"""List of initial states"""
return [self.ia, 0.0]
def update_Vdc(self,Vdc):
"""Update DC link voltage."""
self.Vdc = Vdc
"""
def control_signal_calc(self,signals,t):
Calculate control signal.
if self.model_type is 'EMT_switching':
signals = self.switching_signal_calc(signals,t)
control_signal = signals['switching']
elif self.model_type is 'EMT_average':
signals = self.average_signal_calc(signals,t)
control_signal = signals['modulating']
elif self.model_type is 'dynamicphasor':
pass
return control_signal
"""
def setup_model(self):
"""Initialize mode."""
self.initialize_model()
self.vt_calc = self.select_vt_model()
self.vpcc_calc = self.select_vpcc_model()
self.ODE_model = self.select_ODE_model()
#self.control_signal_calc = self.select_control_signal()
def select_control_signal(self):
"""Select the control signal suitable for the problem."""
if self.model_type is 'EMT_switching':
if self.inverter_type == 'single_phase_half_bridge':
control_signal = self.switching_signal_single_phase
elif self.inverter_type == 'single_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
elif self.model_type is 'EMT_average':
if self.inverter_type == 'single_phase_half_bridge':
control_signal = self.modulating_signal_single_phase
elif self.inverter_type == 'single_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
elif self.model_type is 'dynamic_phasor':
if self.inverter_type == 'single_phase_half_bridge':
control_signal = self.phasor_signal_single_phase
elif self.inverter_type == 'single_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
return control_signal
def select_vt_model(self):
"""Get the terminal voltage model."""
if self.model_type == 'EMT_switching':
if self.inverter_type == 'single_phase_half_bridge':
vt_model = self.single_phase_half_bridge_switching
elif self.inverter_type == 'single_phase_full_bridge':
vt_model = self.single_phase_full_bridge_switching
elif self.inverter_type == 'three_phase_full_bridge':
vt_model = self.three_phase_full_bridge_switching
else:
print(f'{self.inverter_type} not found for model type {self.model_type}!')
elif self.model_type == 'EMT_average':
if self.inverter_type == 'single_phase_half_bridge':
vt_model = self.single_phase_half_bridge_average
elif self.inverter_type == 'single_phase_full_bridge':
vt_model = self.single_phase_full_bridge_average
elif self.inverter_type == 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
else:
print(f'{self.inverter_type} not found for model type {self.model_type}!')
elif self.model_type == 'dynamicphasor':
if self.inverter_type == 'single_phase_half_bridge':
vt_model = self.single_phase_half_bridge_phasor
elif self.inverter_type == 'single_phase_full_bridge':
vt_model = self.single_phase_full_bridge_phasor
elif self.inverter_type == 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
else:
print(f'{self.inverter_type} not found for model type {self.model_type}!')
print(type(vt_model))
return vt_model
def select_vpcc_model(self,grid=None):
"""Get the PCC voltage model."""
if not grid:
vpcc_model = self.v_load_model()
return vpcc_model
def select_ODE_model(self):
"""Select ODE model."""
if self.model_type is 'EMT_switching' or self.model_type is 'EMT_average':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
ODE_model = self.ODE_model_single_phase_EMT
elif self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
elif self.model_type is 'dynamic_phasor':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
ODE_model = self.ODE_model_single_phase_dynamicphasor
elif self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
return ODE_model
def initialize_model(self):
"""Initialize mode."""
if self.model_type is 'EMT_switching' or self.model_type is 'EMT_average':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
self.ia = 0.0
elif self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError
elif self.model_type is 'dynamic_phasor':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
self.iaR = 0.0
self.iaI = 0.0
if self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError
"""
def vta_calc(self,Vdc,control_signal):
Calculate inverter terminal voltage.
if self.model_type is 'switching':
vta = self.half_bridge_switching(Vdc,control_signal)
elif self.model_type is 'average':
vta = self.half_bridge_average(Vdc,control_signal)
return vta
"""
def v_load_model(self):
"""Calculate voltage across load at PCC."""
return self.Rload*self.ia
def ODE_model_switching(self,y,t):
"""ODE model of inverter branch."""
self.ia,dummy = y # unpack current values of y
Vdc = 100.0 #Get DC link voltage
switching_signal = self.control_signal_calc(t)
self.vta = self.half_bridge_switching(Vdc,switching_signal)
self.va = self.PCC_voltage_calc(self.ia,t)
dia = (1/self.Lf)*(-self.Rf*self.ia -self.va + self.vta)
result = [dia,dummy]
return np.array(result)
def ODE_model_average(self,y,t):
"""ODE model of inverter branch."""
self.ia,dummy = y # unpack current values of y
Vdc = 100.0 #Get DC link voltage
modulating_signal = self.control_signal_calc(t)
self.vta = self.half_bridge_average(Vdc,modulating_signal)
self.va = self.PCC_voltage_calc(self.ia,t)
dia = (1/self.Lf)*(-self.Rf*self.ia -self.va + self.vta)
result = [dia,dummy]
return np.array(result)
def power_calc(self,v,i):
"""Calcuate instantaneous power."""
return v*i
def show_states(self):
"""Show states."""
print('Inverter states:{}'.format(self.y))
|
[
"numpy.array"
] |
[((10519, 10535), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (10527, 10535), True, 'import numpy as np\n'), ((11081, 11097), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (11089, 11097), True, 'import numpy as np\n')]
|
import pytest
from todocli.todo import commands
class TestCommandparser(object):
successful_user_input = ['-f', 'file1', '-e', '.py', '-m', ]
successfully_parsed_args = commands.command_interpreter(successful_user_input)
no_args_parsed_args = commands.command_interpreter([])
# successful run
def test_HasNamesAttribute(self):
assert hasattr(self.successfully_parsed_args, 'names')
def test_HasExtensionsAttribute(self):
assert hasattr(self.successfully_parsed_args, 'extensions')
def test_HasIsFolderAttribute(self):
assert hasattr(self.successfully_parsed_args, 'is_folder')
def test_HasNewConfigAttribute(self):
assert hasattr(self.successfully_parsed_args, 'new_config')
def test_FileNamePresent(self):
assert 'file1' in self.successfully_parsed_args.names
def test_ExtensionPresent(self):
assert '.py' in self.successfully_parsed_args.extensions
def test_IsFolderIsTrue(self):
assert self.successfully_parsed_args.is_folder == True
def test_NewConfigIsFalse(self):
assert self.successfully_parsed_args.new_config == False
# no filename arguement
def test_NoFileNameArguement(self):
assert self.no_args_parsed_args.names is None
# no extension argument
def test_NoExtensionsArgument(self):
assert self.no_args_parsed_args.extensions is None
# no is_folder argument
def test_NoIsFolderArguement(self):
assert self.no_args_parsed_args.is_folder is None
# no new_config argument
def test_NoNewConfigArgeuement(self):
assert self.no_args_parsed_args.new_config is False
# no debug argument
def test_NoDebugArguement(self):
assert self.no_args_parsed_args.debug_mode is False
# no file name in input
def test_NoFileName(self):
no_file_name_user_input = ['-f', '-e', '.py', '-m', ]
with pytest.raises(SystemExit):
commands.command_interpreter(no_file_name_user_input)
# No extensions in input
def test_NoExtensions(self):
no_extension_user_input = ['-f', 'File1', '-e', '-m', ]
with pytest.raises(SystemExit):
commands.command_interpreter(no_extension_user_input)
|
[
"todocli.todo.commands.command_interpreter",
"pytest.raises"
] |
[((178, 229), 'todocli.todo.commands.command_interpreter', 'commands.command_interpreter', (['successful_user_input'], {}), '(successful_user_input)\n', (206, 229), False, 'from todocli.todo import commands\n'), ((256, 288), 'todocli.todo.commands.command_interpreter', 'commands.command_interpreter', (['[]'], {}), '([])\n', (284, 288), False, 'from todocli.todo import commands\n'), ((1937, 1962), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (1950, 1962), False, 'import pytest\n'), ((1976, 2029), 'todocli.todo.commands.command_interpreter', 'commands.command_interpreter', (['no_file_name_user_input'], {}), '(no_file_name_user_input)\n', (2004, 2029), False, 'from todocli.todo import commands\n'), ((2176, 2201), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (2189, 2201), False, 'import pytest\n'), ((2215, 2268), 'todocli.todo.commands.command_interpreter', 'commands.command_interpreter', (['no_extension_user_input'], {}), '(no_extension_user_input)\n', (2243, 2268), False, 'from todocli.todo import commands\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 12:48:05 2020
@author: sven
"""
import numpy as np
def nearfield(f,c,theta):
"""
Compute the nearfield
Parameters
----------
f : numeric
Transducer Frequency in kHz [kHz].
c : numeric
Ambient sound speed [m/s].
theta : numeric
3dB angle or beam width in degrees [degrees].
Returns
-------
Rnf : numeric
Range of the nearfield for the given conditions in meters [m].
"""
lmbd = c/ ( f * 1000)
k = 2* np.pi / lmbd
a = 1.6 / (k * np.sin((theta * np.pi/180) / 2))
Rnf = (2*a)**2 / lmbd
return Rnf
def eba(f,c,theta):
"""
Compute the equivalent beam angle for a circular transducer.
Parameters
----------
f : numeric
Transducer Frequency in kHz [kHz].
c : numeric
Ambient sound speed [m/s].
theta : numeric
3dB angle or beam width in degrees [degrees].
Returns
-------
EBA : numeric
equivalent beam angle in dB [dB].
"""
lmbd = c/ ( f * 1000)
k = 2* np.pi / lmbd
a = 1.6 / (k * np.sin((theta * np.pi/180) / 2))
EBA = 10 * np.log10( 5.78 / ( ( k * a ) ** 2))#equivalent beam angle in steradians
return EBA
def vol_samp(f,c,theta,tau,R,start=0):
f = f*1000
Rtot = R+start
Vtot = 10**(eba(f,c,theta)/10) * Rtot**2 * c * tau / 2
V0 = 10**(eba(f,c,theta)/10) * start**2 * c * tau / 2
V = Vtot - V0
return V
def footprint_radius(theta,R):
return R * np.tan(theta * np.pi / 180 / 2)
def footprint_area(theta, R):
return np.pi * footprint_radius(theta,R)**2
'''
vol_samp(f=200,c=1450,theta=9.8,tau=6/1000,R=10)
vol_samp(f=1000,c=1450,theta=4,tau=6/1000,R=10)
#Zonar
nearfield(200,1480,9.8)
nearfield(1000,1480,4)
c=1450;f=200000
0.045**2/(c/f)
c=1450;f=1000000
0.022**2/(c/f)
'''
|
[
"numpy.sin",
"numpy.log10",
"numpy.tan"
] |
[((1024, 1053), 'numpy.log10', 'np.log10', (['(5.78 / (k * a) ** 2)'], {}), '(5.78 / (k * a) ** 2)\n', (1032, 1053), True, 'import numpy as np\n'), ((1356, 1387), 'numpy.tan', 'np.tan', (['(theta * np.pi / 180 / 2)'], {}), '(theta * np.pi / 180 / 2)\n', (1362, 1387), True, 'import numpy as np\n'), ((504, 535), 'numpy.sin', 'np.sin', (['(theta * np.pi / 180 / 2)'], {}), '(theta * np.pi / 180 / 2)\n', (510, 535), True, 'import numpy as np\n'), ((979, 1010), 'numpy.sin', 'np.sin', (['(theta * np.pi / 180 / 2)'], {}), '(theta * np.pi / 180 / 2)\n', (985, 1010), True, 'import numpy as np\n')]
|
from weather_tracker import weather_tracker, output_file_path
from wt_exceptions import WeatherException, LocationException
def check_selection_value(value):
return value in [1, 2]
if __name__ == '__main__':
print("Welcome to the Automatic Weather Machine! We find your location (Based on IP Address) and tell you the "
"weather for your area for the week")
output = None
try:
print('1 - Print to Console')
print('2 - Output to TXT File')
while output is None or type(output) != int or output > 2 or output < 1:
try:
output = int(input("Output Selection: "))
if not check_selection_value(output):
output = None
print("Provide a valid selection for output!")
except ValueError:
print("{} is not a number, please enter a number only".format(output))
result = weather_tracker(output)
if len(result) != 3:
print('You can find your forecast in the file: {}'.format(output_file_path))
else:
forecast = result[0]
city = result[1]
district = result[2]
print("Here is the forecast for {}, {}:".format(city, district))
for day in forecast:
print(day)
except (LocationException, WeatherException) as error:
print(error.args[0])
|
[
"weather_tracker.weather_tracker"
] |
[((933, 956), 'weather_tracker.weather_tracker', 'weather_tracker', (['output'], {}), '(output)\n', (948, 956), False, 'from weather_tracker import weather_tracker, output_file_path\n')]
|
import pandas as pd
import numpy as np
class regout(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
stat_names=['coeff', 'se', 't', 'p>t', 'CI_low', 'CI_high']
var_names=['mpg', 'length', '_cons']
tsls_std = regout(
summary=pd.DataFrame(np.array([
[-1319.865169393102,
1906.786380881755,
-.6921935160784805,
.4910734473693195,
-5121.889227450638,
2482.158888664433,
],
[-217.1947537663291,
420.1260089670161,
-.5169752624941175,
.6067801835089433,
-1054.902223005562,
620.5127154729038,
],
[75092.75604853875,
119511.8053379244,
.6283291917163411,
.5318043826192644,
-163207.0155842729,
313392.5276813505,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[3635834.30231614,
799471.1768877679,
-227680006.992276,
],
[799471.1768877679,
176505.8634105533,
-50197751.5841309,
],
[-227680006.992276,
-50197751.5841309,
14283071615.12995,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-1031817172.794085,
tss=np.nan,
rss=1666882568.915706,
kappa=np.nan,
F=3.97987798611259,
pF=.0230019984382644,
)
tsls_robust = regout(
summary=pd.DataFrame(np.array([
[-1319.865169393102,
2357.647789772478,
-.5598228773265894,
.5773622437125422,
-6020.881343525829,
3381.151004739624,
],
[-217.1947537663291,
503.6720846601052,
-.4312225362120266,
.6676130605679584,
-1221.488366543325,
787.0988590106673,
],
[75092.75604853875,
144765.6412502902,
.5187194654752942,
.6055693972498957,
-213561.7342143963,
363747.2463114738,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[5558503.100619048,
1185986.375722446,
-341107563.0831394,
],
[1185986.375722446,
253685.5688658562,
-72904288.91181517,
],
[-341107563.0831394,
-72904288.91181517,
20957090886.60773,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-1031817172.794085,
tss=np.nan,
rss=1666882568.915706,
kappa=np.nan,
F=3.406896316082843,
pF=.0386511725211229,
)
tsls_cluster = regout(
summary=pd.DataFrame(np.array([
[-1319.865169393102,
2257.567862016117,
-.5846403076514384,
.5625396644960171,
-5902.971584635199,
3263.241245848994,
],
[-217.1947537663291,
486.3497477085017,
-.4465814052329,
.6579283787885248,
-1204.537232491913,
770.1477249592547,
],
[75092.75604853875,
139493.4175166438,
.5383247280437371,
.5937601902027558,
-208093.9367907353,
358279.4488878128,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[5096612.65160802,
1096219.32167181,
-314686204.683651,
],
[1096219.32167181,
236536.0770961233,
-67830404.58467865,
],
[-314686204.683651,
-67830404.58467865,
19458413530.47272,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-1031817172.794085,
tss=np.nan,
rss=1666882568.915706,
kappa=np.nan,
F=3.125695274137819,
pF=.0563657644983311,
)
|
[
"numpy.array"
] |
[((278, 673), 'numpy.array', 'np.array', (['[[-1319.865169393102, 1906.786380881755, -0.6921935160784805, \n 0.4910734473693195, -5121.889227450638, 2482.158888664433], [-\n 217.1947537663291, 420.1260089670161, -0.5169752624941175, \n 0.6067801835089432, -1054.902223005562, 620.5127154729038], [\n 75092.75604853875, 119511.8053379244, 0.628329191716341, \n 0.5318043826192644, -163207.0155842729, 313392.5276813505]]'], {}), '([[-1319.865169393102, 1906.786380881755, -0.6921935160784805, \n 0.4910734473693195, -5121.889227450638, 2482.158888664433], [-\n 217.1947537663291, 420.1260089670161, -0.5169752624941175, \n 0.6067801835089432, -1054.902223005562, 620.5127154729038], [\n 75092.75604853875, 119511.8053379244, 0.628329191716341, \n 0.5318043826192644, -163207.0155842729, 313392.5276813505]])\n', (286, 673), True, 'import numpy as np\n'), ((734, 930), 'numpy.array', 'np.array', (['[[3635834.30231614, 799471.1768877679, -227680006.992276], [\n 799471.1768877679, 176505.8634105533, -50197751.5841309], [-\n 227680006.992276, -50197751.5841309, 14283071615.12995]]'], {}), '([[3635834.30231614, 799471.1768877679, -227680006.992276], [\n 799471.1768877679, 176505.8634105533, -50197751.5841309], [-\n 227680006.992276, -50197751.5841309, 14283071615.12995]])\n', (742, 930), True, 'import numpy as np\n'), ((1185, 1581), 'numpy.array', 'np.array', (['[[-1319.865169393102, 2357.647789772478, -0.5598228773265894, \n 0.5773622437125422, -6020.881343525829, 3381.151004739624], [-\n 217.1947537663291, 503.6720846601052, -0.4312225362120266, \n 0.6676130605679584, -1221.488366543325, 787.0988590106673], [\n 75092.75604853875, 144765.6412502902, 0.5187194654752942, \n 0.6055693972498957, -213561.7342143963, 363747.2463114738]]'], {}), '([[-1319.865169393102, 2357.647789772478, -0.5598228773265894, \n 0.5773622437125422, -6020.881343525829, 3381.151004739624], [-\n 217.1947537663291, 503.6720846601052, -0.4312225362120266, \n 0.6676130605679584, -1221.488366543325, 787.0988590106673], [\n 75092.75604853875, 144765.6412502902, 0.5187194654752942, \n 0.6055693972498957, -213561.7342143963, 363747.2463114738]])\n', (1193, 1581), True, 'import numpy as np\n'), ((1641, 1842), 'numpy.array', 'np.array', (['[[5558503.100619048, 1185986.375722446, -341107563.0831394], [\n 1185986.375722446, 253685.5688658562, -72904288.91181517], [-\n 341107563.0831394, -72904288.91181517, 20957090886.60773]]'], {}), '([[5558503.100619048, 1185986.375722446, -341107563.0831394], [\n 1185986.375722446, 253685.5688658562, -72904288.91181517], [-\n 341107563.0831394, -72904288.91181517, 20957090886.60773]])\n', (1649, 1842), True, 'import numpy as np\n'), ((2099, 2492), 'numpy.array', 'np.array', (['[[-1319.865169393102, 2257.567862016117, -0.5846403076514384, \n 0.5625396644960171, -5902.971584635199, 3263.241245848994], [-\n 217.1947537663291, 486.3497477085017, -0.4465814052329, \n 0.6579283787885248, -1204.537232491913, 770.1477249592547], [\n 75092.75604853875, 139493.4175166438, 0.5383247280437371, \n 0.5937601902027558, -208093.9367907353, 358279.4488878128]]'], {}), '([[-1319.865169393102, 2257.567862016117, -0.5846403076514384, \n 0.5625396644960171, -5902.971584635199, 3263.241245848994], [-\n 217.1947537663291, 486.3497477085017, -0.4465814052329, \n 0.6579283787885248, -1204.537232491913, 770.1477249592547], [\n 75092.75604853875, 139493.4175166438, 0.5383247280437371, \n 0.5937601902027558, -208093.9367907353, 358279.4488878128]])\n', (2107, 2492), True, 'import numpy as np\n'), ((2552, 2748), 'numpy.array', 'np.array', (['[[5096612.65160802, 1096219.32167181, -314686204.683651], [1096219.32167181,\n 236536.0770961233, -67830404.58467865], [-314686204.683651, -\n 67830404.58467865, 19458413530.47272]]'], {}), '([[5096612.65160802, 1096219.32167181, -314686204.683651], [\n 1096219.32167181, 236536.0770961233, -67830404.58467865], [-\n 314686204.683651, -67830404.58467865, 19458413530.47272]])\n', (2560, 2748), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('omaha', '0022_auto_20150909_0755'),
]
operations = [
migrations.AddField(
model_name='request',
name='ip',
field=models.GenericIPAddressField(null=True, blank=True),
),
]
|
[
"django.db.models.GenericIPAddressField"
] |
[((341, 392), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (369, 392), False, 'from django.db import models, migrations\n')]
|
import gibberish
def test_generate_word():
word = gibberish.generate_word()
assert len(word)
assert word.isalpha()
def test_generate_words():
word_list = gibberish.generate_word()
assert len(word_list)
for word in word_list:
assert len(word)
assert word.isalpha()
|
[
"gibberish.generate_word"
] |
[((56, 81), 'gibberish.generate_word', 'gibberish.generate_word', ([], {}), '()\n', (79, 81), False, 'import gibberish\n'), ((174, 199), 'gibberish.generate_word', 'gibberish.generate_word', ([], {}), '()\n', (197, 199), False, 'import gibberish\n')]
|
#import pygame
#pygame.mixer.init()
#pygame.mixer.music.load('desaf021.mp3')
#pygame.mixer.music.play()
#while pygame.mixer.music.get_busy(): pass
import playsound
playsound.playsound('desaf021.mp3')
|
[
"playsound.playsound"
] |
[((164, 199), 'playsound.playsound', 'playsound.playsound', (['"""desaf021.mp3"""'], {}), "('desaf021.mp3')\n", (183, 199), False, 'import playsound\n')]
|
# Author: <NAME>
# MIT license (see LICENCE.txt in the top-level folder)
import unittest
import numpy as np
from numpy import random
from numpy import linalg as LA
from sklearn.linear_model import LinearRegression, LogisticRegression
from single_neuron import models as models
from single_neuron import math_utils as math_utils
datasets_n = 50
max_ds_n = 10000
max_features_n = 100
max_abs_value = 1000
min_epochs = 100
max_epochs = 10000
min_lr = 1e-9
max_lr = 1e-5
def generate_synthetic_datasets(N_max, m_max, gaussian=False):
N_train = random.randint(3, N_max + 1)
N_valid = random.randint(3, N_max + 1)
m = random.randint(2, m_max + 1)
if gaussian:
# we are generating a synthetic dataset based on a multivariate Gaussian
# distribution. In order to generate the latter, we need a mean vector
# (easy) and a positive definite matrix for the covariances. This matrix
# is way more tricky to sample and I don' t know what is the best way.
# My current brute-force approach is the following: (a) I sample m
# vectors; (b) I take all the possible inner products (Gram matrix) as
# the covariance matrix and (c) if the covariance matrix is singular, I
# go back to step (b).
mu = 2 * (random.rand(m) - 0.5) * max_abs_value
Cov = np.zeros([m, m])
while LA.matrix_rank(Cov) != m:
a = 2 * (random.rand(m) - 0.5) * max_abs_value
X = a * random.rand(m, m)
Cov = X.T.dot(X)
train_ds = random.multivariate_normal(mu, Cov, N_train)
valid_ds = random.multivariate_normal(mu, Cov, N_valid)
else:
# uniformly random datasets
train_ds = 2 * (random.rand(N_train, m) - 0.5) * max_abs_value
valid_ds = 2 * (random.rand(N_valid, m) - 0.5) * max_abs_value
return train_ds, valid_ds
class TestLinearNeuron(unittest.TestCase):
def setUp(self):
"""
Prepare a few synthetic datasets for the tests. Two categories of
datasets: One random without any implied structure and one that arises
from a predefined distribution.
"""
self.train_X = []
self.valid_X = []
self.train_y = []
self.valid_y = []
for ds_i in range(0, datasets_n):
# make sure that there are some datasets with extremely small values
if ds_i < 10:
N_max = 7
else:
N_max = max_ds_n
if ds_i < 10:
m_max = 2
else:
m_max = max_features_n
#gaussian = random.rand() < 0.5
gaussian = True
train_ds, valid_ds = generate_synthetic_datasets(N_max, m_max,
gaussian)
# we use the last column as the target variable
self.train_X.append(train_ds[:, :-1])
self.valid_X.append(valid_ds[:, :-1])
self.train_y.append(train_ds[:, -1])
self.valid_y.append(valid_ds[:, -1])
self.lin_model = LinearRegression()
def test_rmse_is_equal_with_sklearn(self):
pass
def test_params_are_equal_with_sklearn(self):
pass
def test_initialization_does_not_matter(self):
pass
class TestReluNeuron(unittest.TestCase):
def test_rmse_is_equal_with_sklearn(self):
pass
def test_initialization_with_negatives_leads_to_zero_gradients(self):
pass
def test_initialization_does_not_matter(self):
pass
class TestLogisticNeuron(unittest.TestCase):
def test_ce_is_equal_with_sklearn(self):
pass
def test_initialization_does_not_matter(self):
pass
|
[
"numpy.zeros",
"sklearn.linear_model.LinearRegression",
"numpy.linalg.matrix_rank",
"numpy.random.randint",
"numpy.random.multivariate_normal",
"numpy.random.rand"
] |
[((551, 579), 'numpy.random.randint', 'random.randint', (['(3)', '(N_max + 1)'], {}), '(3, N_max + 1)\n', (565, 579), False, 'from numpy import random\n'), ((594, 622), 'numpy.random.randint', 'random.randint', (['(3)', '(N_max + 1)'], {}), '(3, N_max + 1)\n', (608, 622), False, 'from numpy import random\n'), ((631, 659), 'numpy.random.randint', 'random.randint', (['(2)', '(m_max + 1)'], {}), '(2, m_max + 1)\n', (645, 659), False, 'from numpy import random\n'), ((1359, 1375), 'numpy.zeros', 'np.zeros', (['[m, m]'], {}), '([m, m])\n', (1367, 1375), True, 'import numpy as np\n'), ((1569, 1613), 'numpy.random.multivariate_normal', 'random.multivariate_normal', (['mu', 'Cov', 'N_train'], {}), '(mu, Cov, N_train)\n', (1595, 1613), False, 'from numpy import random\n'), ((1633, 1677), 'numpy.random.multivariate_normal', 'random.multivariate_normal', (['mu', 'Cov', 'N_valid'], {}), '(mu, Cov, N_valid)\n', (1659, 1677), False, 'from numpy import random\n'), ((3207, 3225), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3223, 3225), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1390, 1409), 'numpy.linalg.matrix_rank', 'LA.matrix_rank', (['Cov'], {}), '(Cov)\n', (1404, 1409), True, 'from numpy import linalg as LA\n'), ((1495, 1512), 'numpy.random.rand', 'random.rand', (['m', 'm'], {}), '(m, m)\n', (1506, 1512), False, 'from numpy import random\n'), ((1298, 1312), 'numpy.random.rand', 'random.rand', (['m'], {}), '(m)\n', (1309, 1312), False, 'from numpy import random\n'), ((1749, 1772), 'numpy.random.rand', 'random.rand', (['N_train', 'm'], {}), '(N_train, m)\n', (1760, 1772), False, 'from numpy import random\n'), ((1820, 1843), 'numpy.random.rand', 'random.rand', (['N_valid', 'm'], {}), '(N_valid, m)\n', (1831, 1843), False, 'from numpy import random\n'), ((1437, 1451), 'numpy.random.rand', 'random.rand', (['m'], {}), '(m)\n', (1448, 1451), False, 'from numpy import random\n')]
|
import logging
logger = logging.getLogger()
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
|
[
"logging.Formatter",
"logging.StreamHandler",
"logging.getLogger"
] |
[((25, 44), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (42, 44), False, 'import logging\n'), ((58, 118), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s %(levelname)s] %(message)s"""'], {}), "('[%(asctime)s %(levelname)s] %(message)s')\n", (75, 118), False, 'import logging\n'), ((168, 191), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (189, 191), False, 'import logging\n')]
|
from django import template
from joblistings.models import Job
from companies.models import Company
register = template.Library()
@register.inclusion_tag('dashbar.html')
def get_dashbar(*args, **kwargs):
location = kwargs['location']
return {
'location': location
}
|
[
"django.template.Library"
] |
[((114, 132), 'django.template.Library', 'template.Library', ([], {}), '()\n', (130, 132), False, 'from django import template\n')]
|
# Solution of;
# Project Euler Problem 405: A rectangular tiling
# https://projecteuler.net/problem=405
#
# We wish to tile a rectangle whose length is twice its width. Let T(0) be the
# tiling consisting of a single rectangle. For n > 0, let T(n) be obtained
# from T(n-1) by replacing all tiles in the following manner:The following
# animation demonstrates the tilings T(n) for n from 0 to 5:Let f(n) be the
# number of points where four tiles meet in T(n). For example, f(1) = 0, f(4)
# = 82 and f(109) mod 177 = 126897180. Find f(10k) for k = 1018, give your
# answer modulo 177.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 405
timed.caller(dummy, n, i, prob_id)
|
[
"timed.caller"
] |
[((762, 796), 'timed.caller', 'timed.caller', (['dummy', 'n', 'i', 'prob_id'], {}), '(dummy, n, i, prob_id)\n', (774, 796), False, 'import timed\n')]
|
import logging
import numpy as np
from time import time as now
from gunpowder.batch import Batch
from gunpowder.profiling import Timing
from gunpowder.array import Array
from gunpowder.nodes.hdf5like_source_base import Hdf5LikeSource
from gunpowder.compat import ensure_str
from gunpowder.coordinate import Coordinate
from gunpowder.ext import ZarrFile
logger = logging.getLogger(__name__)
class Hdf5InMemory(Hdf5LikeSource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.in_mem_datasets = {}
def setup(self):
super().setup()
with self._open_file(self.filename) as data_file:
for (array_key, ds_name) in self.datasets.items():
if ds_name not in data_file:
raise RuntimeError("%s not in %s" %
(ds_name, self.filename))
spec = self._Hdf5LikeSource__read_spec(
array_key, data_file, ds_name)
# logger.info(spec)
# logger.info(spec.roi)
# logger.info(spec.roi.get_offset())
# logger.info((spec.roi - spec.roi.get_offset()) /
# spec.voxel_size)
start = now()
logger.info(
f'start loading {ds_name} into memory')
self.in_mem_datasets[array_key] = self._Hdf5LikeSource__read(
data_file,
self.datasets[array_key],
(spec.roi - spec.roi.get_offset()) / spec.voxel_size,
)
logger.info(
f'loaded {ds_name} into memory in {now() - start} s')
def provide(self, request):
timing = Timing(self)
timing.start()
batch = Batch()
for (array_key, request_spec) in request.array_specs.items():
voxel_size = self.spec[array_key].voxel_size
# scale request roi to voxel units
dataset_roi = request_spec.roi / voxel_size
# shift request roi into dataset
dataset_roi = (
dataset_roi -
self.spec[array_key].roi.get_offset() / voxel_size
)
# create array spec
array_spec = self.spec[array_key].copy()
array_spec.roi = request_spec.roi
# add array to batch
batch.arrays[array_key] = Array(
self.__read(array_key, dataset_roi), array_spec
)
logger.debug("done")
timing.stop()
batch.profiling_stats.add(timing)
return batch
def __read(self, array_key, roi):
in_mem_array = self.in_mem_datasets[array_key]
c = len(in_mem_array.shape) - self.ndims
if self.channels_first:
array = np.asarray(
in_mem_array[(slice(None),) * c + roi.to_slices()])
else:
array = np.asarray(
in_mem_array[roi.to_slices() + (slice(None),) * c])
array = np.transpose(
array, axes=[
i + self.ndims for i in range(c)] + list(range(self.ndims))
)
return array
def __repr__(self):
return self.filename
class InMemZarrSource(Hdf5InMemory):
'''A `zarr <https://github.com/zarr-developers/zarr>`_ data source.
Provides arrays from zarr datasets. If the attribute ``resolution`` is set
in a zarr dataset, it will be used as the array's ``voxel_size``. If the
attribute ``offset`` is set in a dataset, it will be used as the offset of
the :class:`Roi` for this array. It is assumed that the offset is given in
world units.
Args:
filename (``string``):
The zarr directory.
datasets (``dict``, :class:`ArrayKey` -> ``string``):
Dictionary of array keys to dataset names that this source offers.
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
An optional dictionary of array keys to array specs to overwrite
the array specs automatically determined from the data file. This
is useful to set a missing ``voxel_size``, for example. Only fields
that are not ``None`` in the given :class:`ArraySpec` will be used.
channels_first (``bool``, optional):
Specifies the ordering of the dimensions of the HDF5-like data source.
If channels_first is set (default), then the input shape is expected
to be (channels, spatial dimensions). This is recommended because of
better performance. If channels_first is set to false, then the input
data is read in channels_last manner and converted to channels_first.
'''
def _get_voxel_size(self, dataset):
if 'resolution' not in dataset.attrs:
return None
if self.filename.endswith('.n5'):
return Coordinate(dataset.attrs['resolution'][::-1])
else:
return Coordinate(dataset.attrs['resolution'])
def _get_offset(self, dataset):
if 'offset' not in dataset.attrs:
return None
if self.filename.endswith('.n5'):
return Coordinate(dataset.attrs['offset'][::-1])
else:
return Coordinate(dataset.attrs['offset'])
def _open_file(self, filename):
return ZarrFile(ensure_str(filename), mode='r')
|
[
"time.time",
"gunpowder.coordinate.Coordinate",
"gunpowder.compat.ensure_str",
"gunpowder.profiling.Timing",
"gunpowder.batch.Batch",
"logging.getLogger"
] |
[((365, 392), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (382, 392), False, 'import logging\n'), ((1743, 1755), 'gunpowder.profiling.Timing', 'Timing', (['self'], {}), '(self)\n', (1749, 1755), False, 'from gunpowder.profiling import Timing\n'), ((1796, 1803), 'gunpowder.batch.Batch', 'Batch', ([], {}), '()\n', (1801, 1803), False, 'from gunpowder.batch import Batch\n'), ((4952, 4997), 'gunpowder.coordinate.Coordinate', 'Coordinate', (["dataset.attrs['resolution'][::-1]"], {}), "(dataset.attrs['resolution'][::-1])\n", (4962, 4997), False, 'from gunpowder.coordinate import Coordinate\n'), ((5031, 5070), 'gunpowder.coordinate.Coordinate', 'Coordinate', (["dataset.attrs['resolution']"], {}), "(dataset.attrs['resolution'])\n", (5041, 5070), False, 'from gunpowder.coordinate import Coordinate\n'), ((5237, 5278), 'gunpowder.coordinate.Coordinate', 'Coordinate', (["dataset.attrs['offset'][::-1]"], {}), "(dataset.attrs['offset'][::-1])\n", (5247, 5278), False, 'from gunpowder.coordinate import Coordinate\n'), ((5312, 5347), 'gunpowder.coordinate.Coordinate', 'Coordinate', (["dataset.attrs['offset']"], {}), "(dataset.attrs['offset'])\n", (5322, 5347), False, 'from gunpowder.coordinate import Coordinate\n'), ((5409, 5429), 'gunpowder.compat.ensure_str', 'ensure_str', (['filename'], {}), '(filename)\n', (5419, 5429), False, 'from gunpowder.compat import ensure_str\n'), ((1247, 1252), 'time.time', 'now', ([], {}), '()\n', (1250, 1252), True, 'from time import time as now\n'), ((1673, 1678), 'time.time', 'now', ([], {}), '()\n', (1676, 1678), True, 'from time import time as now\n')]
|
from flask import make_response
from ckan.common import config, c
import ckan.plugins as p
import ckan.model as model
import ckan.authz as authz
import ckan.logic as logic
import ckan.lib.jobs as jobs
from ckanext.iati.helpers import extras_to_dict, parse_error_object_to_list
from ckanext.iati import helpers as h
from ckanext.iati.logic import action
import sqlalchemy
import csv
import StringIO
from collections import OrderedDict
import json
from xlwt import Workbook
import io
import datetime as dt
import os, codecs
import logging
log = logging.getLogger(__name__)
_and_ = sqlalchemy.and_
_not_empty = p.toolkit.get_validator('not_empty')
_ignore_empty = p.toolkit.get_validator('ignore_empty')
_ignore_missing = p.toolkit.get_validator('ignore_missing')
_int_validator = p.toolkit.get_validator('int_validator')
ValidationError = logic.ValidationError
class FormatError(Exception):
pass
class PublishersListDownload:
def __init__(self, download_format, request_recent_publisher=False):
self.request_type_recent_publisher = request_recent_publisher
self.download_format = self._get_format(download_format)
self._site_url = config.get('ckan.site_url')
self._datasets_link = self._site_url + "/publisher/{}"
self._func_mapping = {
'extras_publisher_organization_type':h.get_organization_type_title,
'extras_publisher_country':h.get_country_title
}
self._set_mapping()
def _set_mapping(self):
"""
Set csv column headers accoring to the request type.
If the request is from recent publishers )only for sysadmins), we need first_published_date column
:return:
"""
self._headers = ['Publisher', 'IATI Organisation Identifier', 'Organization Type',
'HQ Country or Region', 'Datasets Count', 'Datasets Link']
self._mapping = ['display_name', 'extras_publisher_iati_id', 'extras_publisher_organization_type',
'extras_publisher_country', 'package_count']
if self.request_type_recent_publisher:
self._headers.insert(4, "First Published Date")
self._mapping.insert(4, "extras_publisher_first_publish_date")
self._headers = tuple(self._headers)
self._mapping = tuple(self._mapping)
def _get_xml_value(self, val):
val = val.replace('&', "&")
return val
def _get_xml_name(self, val):
val = val.lower()
return val.replace(" ", '-')
def _get_format(self, download_format):
try:
download_format = download_format.lower()
_formats = ('csv', 'json', 'xml', 'xls')
if download_format not in _formats:
raise FormatError
return download_format
except Exception as e:
raise FormatError(e)
@staticmethod
def _get_publisher_data():
"""
We cannot use API organization_list with all_fields=True, because it will be expensive process
to by pass max limits
:return: dict
"""
# TODO: Need optimization
# First get package count and then join with Group with ownr_org
package_count = model.Session.query(model.Group, model.Package.owner_org,
sqlalchemy.func.count(model.Package.id).label('package_count')).join(
model.Package, model.Group.id == model.Package.owner_org).filter(
_and_(
model.Group.is_organization == True, model.Group.state == 'active',
model.Package.private == False, model.Package.state == 'active'
)
).group_by(model.Group.id, model.Package.owner_org).subquery()
organization = model.Session.query(model.Group, package_count.c.package_count).join(
package_count, model.Group.id == package_count.c.id).join(model.GroupExtra)
log.info(organization.as_scalar())
return organization.all()
def _prepare(self, data):
"""
Prepare the data for download
:param data:
:return:
"""
clean_data = []
extras = dict(data.Group._extras)
for key in self._mapping[:-1]:
val = ''
if hasattr(data.Group, key):
val = getattr(data.Group, key).encode('utf-8')
if "extras_" in key:
val = extras.get(key.replace("extras_", ''), '')
if val:
val = val.value.encode('utf-8')
if key in self._func_mapping:
val = self._func_mapping.get(key)(val)
clean_data.append(val)
clean_data.append(data.package_count)
clean_data.append(self._datasets_link.format(data.Group.name))
return clean_data
def csv(self):
"""
CSV download.
Sysadmin recent publisher is allowed to download only csv
:return:
"""
f = StringIO.StringIO()
writer = csv.writer(f)
writer.writerow(list(self._headers))
_org_data = PublishersListDownload._get_publisher_data()
rows = []
for org in _org_data:
if org.Group.state == 'active' and int(org.package_count) > 0:
org_data = self._prepare(org)
if self.request_type_recent_publisher:
rows.append(org_data)
else:
writer.writerow(org_data)
# This is expensive but we need sorting for first published
# date since its hard to get sorted for GroupExtra table
if self.request_type_recent_publisher:
rows = sorted(rows, key=lambda entry: entry[4], reverse=True)
for csv_row in rows:
writer.writerow(csv_row)
output = f.getvalue()
f.close()
response = make_response(output)
response.headers['Content-type'] = 'text/csv'
return response
def json(self):
"""
Json download
:return:
"""
f = StringIO.StringIO()
json_data = []
_org_data = PublishersListDownload._get_publisher_data()
for org in _org_data:
if org.Group.state == 'active' and int(org.package_count) > 0:
json_data.append(OrderedDict(zip(self._headers, self._prepare(org))))
json.dump(json_data, f)
output = f.getvalue()
f.close()
response = make_response(output)
response.headers['Content-type'] = 'application/json'
return response
def xml(self):
"""
xml format download
:return:
"""
f = StringIO.StringIO()
fields = list(self._headers)
fields.pop(1)
xml = ['<?xml version="1.0" encoding="UTF-8" ?>']
_observations = ' <{}>{}</{}>'
xml.append('<iati-publishers-list>')
_org_data = PublishersListDownload._get_publisher_data()
for org in _org_data:
if org.Group.state == 'active' and int(org.package_count) > 0:
_dt = self._prepare(org)
_dt[4] = str(int(_dt[4])) # Package count to string
_iati_identifier = _dt.pop(1)
xml.append('<iati-identifier id="{}">'.format(_iati_identifier))
for _index, _field in enumerate(fields):
field = self._get_xml_name(_field)
if field == "Datasets Link":
xml.append('<iati-publisher-page xmlns:xlink="http://www.w3.org/1999/xlink">')
xml.append(' <iati-publisher-page xlink:type="simple" '
'xlink:href="{}">{}</iati-publisher-page>'.format(_dt[_index],
self._get_xml_value(_dt[0])))
xml.append('</iati-publisher-page>')
else:
xml.append(_observations.format(field, self._get_xml_value(_dt[_index]), field))
xml.append('</iati-identifier>')
xml.append('</iati-publishers-list>')
f.write("\n".join(xml))
output = f.getvalue()
f.close()
response = make_response(output)
response.headers['Content-type'] = 'text/xml'
return response
def xls(self):
"""
xls format download
:return:
"""
f = StringIO.StringIO()
wb = Workbook(encoding='utf-8')
sheet1 = wb.add_sheet('IATI Publishers List')
_org_data = PublishersListDownload._get_publisher_data()
# Write Headers
for _index, _field in enumerate(self._headers):
sheet1.write(0, _index, _field)
# Write Rows and Values
_row = 1
for org in _org_data:
if org.Group.state == 'active' and int(org.package_count) > 0:
_dt = self._prepare(org)
# Write Items
for _col, _item in enumerate(_dt):
sheet1.write(_row, _col, _item)
_row += 1
wb.save(f)
output = f.getvalue()
f.close()
response = make_response(output)
return response
def download(self):
response = getattr(PublishersListDownload, self.download_format)(self)
file_name = 'iati_publishers_list'
response.headers['Content-disposition'] = 'attachment;filename={}.{}'.format(file_name,
self.download_format)
return response
class PublisherRecordsDownload:
CSV_MAPPING = [
('registry-publisher-id', 'organization', 'name'),
('registry-file-id', 'package', 'name'),
('title', 'package', 'title'),
('description', 'package', 'notes'),
('contact-email', 'package', 'author_email'),
('state', 'package', 'state'),
('source-url', 'resources', 'url'),
('file-type', 'package', 'filetype'),
('recipient-country', 'package', 'country'),
('default-language', 'package', 'language'),
('secondary-publisher', 'package', 'secondary_publisher'),
]
OPTIONAL_COLUMNS = ['state', 'description', 'default-language', 'secondary-publisher']
MAX_ROWS = int(config.get('ckanext.iati.max_rows_csv_upload', 101))
def __init__(self):
pass
def _get_packages_for_org(self, context, org_name):
"""
:param context:
:param org_name:
:return:
"""
rows = 100
start = 0
packages = []
data_dict = {
'q': '*:*',
'fq': 'organization:' + org_name,
'rows': rows,
'start': start,
}
def do_query(context, data_dict):
return p.toolkit.get_action('package_search')(context, data_dict)
pending = True
while pending:
query = do_query(context, data_dict)
if len(query['results']):
packages.extend(query['results'])
data_dict['start'] += rows
else:
pending = False
return packages
def write_to_csv(self, publisher):
"""
:param publisher:
:return:
"""
context = {'model': model, 'user': c.user or c.author}
try:
if publisher == 'all':
package_ids = p.toolkit.get_action('package_list')(context, {})
packages = []
for pkg_id in package_ids:
try:
package = p.toolkit.get_action('package_show')(context, {'id': pkg_id})
package.pop('state', None)
packages.append(package)
except p.toolkit.NotAuthorized:
log.warn('User %s not authorized to read package %s' % (c.user, pkg_id))
continue
elif publisher == 'template':
# Just return an empty CSV file with just the headers
packages = []
else:
packages = self._get_packages_for_org(context, publisher)
f = io.BytesIO()
fieldnames = [n[0] for n in self.CSV_MAPPING if n[0] != 'state']
writer = csv.DictWriter(f, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)
headers = dict((n[0], n[0]) for n in self.CSV_MAPPING if n[0] != 'state')
writer.writerow(headers)
for package in packages:
if package:
row = {}
extras_dict = extras_to_dict(package)
for fieldname, entity, key in self.CSV_MAPPING:
if key == 'state':
continue
value = None
if entity == 'organization':
if len(package['organization']):
value = package['organization']['name']
elif entity == 'resources':
if len(package['resources']) and key in package['resources'][0]:
value = package['resources'][0][key]
else:
if key in package:
value = package[key]
elif key in extras_dict:
value = extras_dict[key]
row[fieldname] = value
for field_to_check in ('title', 'description'):
if fieldname == field_to_check and row.get(field_to_check):
row[field_to_check] = row[field_to_check].encode('utf-8')
writer.writerow(row)
output = f.getvalue()
f.close()
return output
except p.toolkit.ObjectNotFound:
p.toolkit.abort(404, 'Organization not found')
class PublisherRecordsUpload(PublisherRecordsDownload):
def __init__(self, *args, **kwargs):
PublisherRecordsDownload.__init__(self)
def _validate_users(self):
"""
Validate user access -
:return: None
"""
log.info("Validating the logged in user")
if not c.user:
return p.toolkit.abort(401, 'You are not logged. Please login')
self.is_sysadmin = authz.is_sysadmin(c.user)
context = {'model': model, 'user': c.user or c.author}
self.authz_orgs = p.toolkit.get_action('organization_list_for_user')(context, {})
if not self.is_sysadmin and not self.authz_orgs:
return p.toolkit.abort(403, 'You are not authorized. You are not an admin of any publisher.')
return None
def _validate_csv_files(self, csv_file):
"""
Validate uploaded csv files.
:return:
"""
log.info("Validating the uploaded csv files")
if not hasattr(csv_file, 'filename'):
raise ValidationError("No CSV file provided. Please upload a CSV file.")
# Verify csv file extension
if os.path.splitext(csv_file.filename)[-1].lower() != '.csv':
raise ValidationError(
"Uploaded file is not a csv file. Please upload a csv file"
)
# Validate csv columns
# Validate Mandatory fields.
bom_length = len(codecs.BOM_UTF8)
data = csv_file.read()
if data.startswith(codecs.BOM_UTF8):
data = data[bom_length:]
if not data:
raise ValidationError("CSV file is empty")
buffer = io.BytesIO(data)
log.info("Validating CSV file....")
reader = csv.reader(buffer)
columns = next(reader)
# Validate columns
if not columns:
buffer.close()
raise ValidationError("Mandatory fields are missing. "
"Download csv upload template (verify mandatory columns) and "
"upload the file accordingly.")
for _col in self.CSV_MAPPING:
is_optional = _col[0] in self.OPTIONAL_COLUMNS
in_columns = _col[0] in columns
if not is_optional and not in_columns:
buffer.close()
raise ValidationError("Mandatory/unrecognized CSV columns. Given csv fields: {}")
# Validate no of rows
row_count = sum(1 for _ in reader)
log.info("Number of rows in csv: {}".format(str(row_count)))
if row_count > self.MAX_ROWS:
raise ValidationError(
"Exceeded the limit. Maximum allowed rows is 50"
)
return data
|
[
"json.dump",
"ckan.authz.is_sysadmin",
"xlwt.Workbook",
"io.BytesIO",
"csv.writer",
"csv.reader",
"ckan.plugins.toolkit.abort",
"ckan.model.Session.query",
"ckan.plugins.toolkit.get_validator",
"ckan.common.config.get",
"ckan.plugins.toolkit.get_action",
"ckanext.iati.helpers.extras_to_dict",
"os.path.splitext",
"sqlalchemy.func.count",
"flask.make_response",
"StringIO.StringIO",
"logging.getLogger",
"csv.DictWriter"
] |
[((543, 570), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (560, 570), False, 'import logging\n'), ((609, 645), 'ckan.plugins.toolkit.get_validator', 'p.toolkit.get_validator', (['"""not_empty"""'], {}), "('not_empty')\n", (632, 645), True, 'import ckan.plugins as p\n'), ((662, 701), 'ckan.plugins.toolkit.get_validator', 'p.toolkit.get_validator', (['"""ignore_empty"""'], {}), "('ignore_empty')\n", (685, 701), True, 'import ckan.plugins as p\n'), ((720, 761), 'ckan.plugins.toolkit.get_validator', 'p.toolkit.get_validator', (['"""ignore_missing"""'], {}), "('ignore_missing')\n", (743, 761), True, 'import ckan.plugins as p\n'), ((779, 819), 'ckan.plugins.toolkit.get_validator', 'p.toolkit.get_validator', (['"""int_validator"""'], {}), "('int_validator')\n", (802, 819), True, 'import ckan.plugins as p\n'), ((1167, 1194), 'ckan.common.config.get', 'config.get', (['"""ckan.site_url"""'], {}), "('ckan.site_url')\n", (1177, 1194), False, 'from ckan.common import config, c\n'), ((5049, 5068), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (5066, 5068), False, 'import StringIO\n'), ((5086, 5099), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (5096, 5099), False, 'import csv\n'), ((5941, 5962), 'flask.make_response', 'make_response', (['output'], {}), '(output)\n', (5954, 5962), False, 'from flask import make_response\n'), ((6137, 6156), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (6154, 6156), False, 'import StringIO\n'), ((6446, 6469), 'json.dump', 'json.dump', (['json_data', 'f'], {}), '(json_data, f)\n', (6455, 6469), False, 'import json\n'), ((6537, 6558), 'flask.make_response', 'make_response', (['output'], {}), '(output)\n', (6550, 6558), False, 'from flask import make_response\n'), ((6746, 6765), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (6763, 6765), False, 'import StringIO\n'), ((8326, 8347), 'flask.make_response', 'make_response', (['output'], {}), '(output)\n', (8339, 8347), False, 'from flask import make_response\n'), ((8527, 8546), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (8544, 8546), False, 'import StringIO\n'), ((8560, 8586), 'xlwt.Workbook', 'Workbook', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (8568, 8586), False, 'from xlwt import Workbook\n'), ((9273, 9294), 'flask.make_response', 'make_response', (['output'], {}), '(output)\n', (9286, 9294), False, 'from flask import make_response\n'), ((10415, 10466), 'ckan.common.config.get', 'config.get', (['"""ckanext.iati.max_rows_csv_upload"""', '(101)'], {}), "('ckanext.iati.max_rows_csv_upload', 101)\n", (10425, 10466), False, 'from ckan.common import config, c\n'), ((14554, 14579), 'ckan.authz.is_sysadmin', 'authz.is_sysadmin', (['c.user'], {}), '(c.user)\n', (14571, 14579), True, 'import ckan.authz as authz\n'), ((15779, 15795), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (15789, 15795), False, 'import io\n'), ((15857, 15875), 'csv.reader', 'csv.reader', (['buffer'], {}), '(buffer)\n', (15867, 15875), False, 'import csv\n'), ((12322, 12334), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (12332, 12334), False, 'import io\n'), ((12433, 12496), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'fieldnames', 'quoting': 'csv.QUOTE_ALL'}), '(f, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)\n', (12447, 12496), False, 'import csv\n'), ((14469, 14525), 'ckan.plugins.toolkit.abort', 'p.toolkit.abort', (['(401)', '"""You are not logged. Please login"""'], {}), "(401, 'You are not logged. Please login')\n", (14484, 14525), True, 'import ckan.plugins as p\n'), ((14669, 14719), 'ckan.plugins.toolkit.get_action', 'p.toolkit.get_action', (['"""organization_list_for_user"""'], {}), "('organization_list_for_user')\n", (14689, 14719), True, 'import ckan.plugins as p\n'), ((14810, 14900), 'ckan.plugins.toolkit.abort', 'p.toolkit.abort', (['(403)', '"""You are not authorized. You are not an admin of any publisher."""'], {}), "(403,\n 'You are not authorized. You are not an admin of any publisher.')\n", (14825, 14900), True, 'import ckan.plugins as p\n'), ((10934, 10972), 'ckan.plugins.toolkit.get_action', 'p.toolkit.get_action', (['"""package_search"""'], {}), "('package_search')\n", (10954, 10972), True, 'import ckan.plugins as p\n'), ((14073, 14119), 'ckan.plugins.toolkit.abort', 'p.toolkit.abort', (['(404)', '"""Organization not found"""'], {}), "(404, 'Organization not found')\n", (14088, 14119), True, 'import ckan.plugins as p\n'), ((11544, 11580), 'ckan.plugins.toolkit.get_action', 'p.toolkit.get_action', (['"""package_list"""'], {}), "('package_list')\n", (11564, 11580), True, 'import ckan.plugins as p\n'), ((12749, 12772), 'ckanext.iati.helpers.extras_to_dict', 'extras_to_dict', (['package'], {}), '(package)\n', (12763, 12772), False, 'from ckanext.iati.helpers import extras_to_dict, parse_error_object_to_list\n'), ((3829, 3892), 'ckan.model.Session.query', 'model.Session.query', (['model.Group', 'package_count.c.package_count'], {}), '(model.Group, package_count.c.package_count)\n', (3848, 3892), True, 'import ckan.model as model\n'), ((15276, 15311), 'os.path.splitext', 'os.path.splitext', (['csv_file.filename'], {}), '(csv_file.filename)\n', (15292, 15311), False, 'import os, codecs\n'), ((11726, 11762), 'ckan.plugins.toolkit.get_action', 'p.toolkit.get_action', (['"""package_show"""'], {}), "('package_show')\n", (11746, 11762), True, 'import ckan.plugins as p\n'), ((3389, 3428), 'sqlalchemy.func.count', 'sqlalchemy.func.count', (['model.Package.id'], {}), '(model.Package.id)\n', (3410, 3428), False, 'import sqlalchemy\n')]
|
import uvicorn
from uvicorn.reloaders.statreload import StatReload
from uvicorn.main import run, get_logger
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from utils.tiempo import eightDigits
from utils.pipeline import MongoDBPipeline
from settings import BACKEND_PORT
app = Starlette(debug='true')
@app.route('/toplist')
async def toplist(request):
try:
day = request.query_params['day']
except KeyError:
day = eightDigits()
try:
chart = request.query_params['chart']
except KeyError:
chart = 'rise'
m = MongoDBPipeline()
query = f'{day}.{chart}'
result = m.ls(query)
for item in result:
item.pop('_id')
return JSONResponse(result)
if __name__ == '__main__':
reloader = StatReload(get_logger('debug'))
reloader.run(run, {
'app': app,
'host': '127.0.0.1',
'port': BACKEND_PORT,
'log_level': 'debug',
'debug': 'true'
})
uvicorn.run(app=app, host='127.0.0.1', port=BACKEND_PORT, debug='true')
|
[
"starlette.applications.Starlette",
"starlette.responses.JSONResponse",
"utils.tiempo.eightDigits",
"uvicorn.run",
"utils.pipeline.MongoDBPipeline",
"uvicorn.main.get_logger"
] |
[((321, 344), 'starlette.applications.Starlette', 'Starlette', ([], {'debug': '"""true"""'}), "(debug='true')\n", (330, 344), False, 'from starlette.applications import Starlette\n'), ((607, 624), 'utils.pipeline.MongoDBPipeline', 'MongoDBPipeline', ([], {}), '()\n', (622, 624), False, 'from utils.pipeline import MongoDBPipeline\n'), ((739, 759), 'starlette.responses.JSONResponse', 'JSONResponse', (['result'], {}), '(result)\n', (751, 759), False, 'from starlette.responses import JSONResponse\n'), ((1004, 1075), 'uvicorn.run', 'uvicorn.run', ([], {'app': 'app', 'host': '"""127.0.0.1"""', 'port': 'BACKEND_PORT', 'debug': '"""true"""'}), "(app=app, host='127.0.0.1', port=BACKEND_PORT, debug='true')\n", (1015, 1075), False, 'import uvicorn\n'), ((815, 834), 'uvicorn.main.get_logger', 'get_logger', (['"""debug"""'], {}), "('debug')\n", (825, 834), False, 'from uvicorn.main import run, get_logger\n'), ((484, 497), 'utils.tiempo.eightDigits', 'eightDigits', ([], {}), '()\n', (495, 497), False, 'from utils.tiempo import eightDigits\n')]
|
import json, base64
import pulumi
from pulumi.output import Output
from pulumi.resource import ComponentResource, ResourceOptions
import pulumi_gcp as gcp
import pulumi_datadog as datadog
import pulumi_random
class GCPLogSinkToDataDog(ComponentResource):
def __init__(
self,
name: str,
opts: ResourceOptions = None):
super().__init__('datadog_gcp_integration:index:GCPLogSinkToDataDog', name, None, opts)
topic = gcp.pubsub.Topic(
f'{name}-topic',
name='export-logs-to-datadog',
opts=ResourceOptions(parent=self))
dd_api_key = pulumi.Config(name='datadog').require('apiKey')
push_to_dd = gcp.pubsub.Subscription(
f'{name}-subscription',
name='export-logs-to-datadog.push-to-dd',
topic=topic.id,
push_config=gcp.pubsub.SubscriptionPushConfigArgs(
push_endpoint=f'https://gcp-intake.logs.datadoghq.eu/api/v2/logs?dd-api-key={dd_api_key}&dd-protocol=gcp'),
expiration_policy=gcp.pubsub.SubscriptionExpirationPolicyArgs(
ttl=''),
retry_policy=gcp.pubsub.SubscriptionRetryPolicyArgs(
minimum_backoff='10s',
maximum_backoff='600s'),
opts=ResourceOptions(parent=self))
project = gcp.organizations.get_project()
pubsub_sa = f'serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com'
gcp.pubsub.SubscriptionIAMBinding(
f'{name}-subscriber-ack',
subscription=push_to_dd.id,
members=[pubsub_sa],
role='roles/pubsub.subscriber',
opts=ResourceOptions(parent=self))
log_sink = gcp.logging.ProjectSink(
f'{name}-log-sink',
name='export-logs-to-datadog',
destination=Output.concat('pubsub.googleapis.com/', topic.id),
unique_writer_identity=True,
opts=ResourceOptions(parent=self, depends_on=[push_to_dd]))
gcp.pubsub.TopicIAMMember(
f'{name}-log-sink-pubsub-publisher',
topic=topic.id,
role='roles/pubsub.publisher',
member=log_sink.writer_identity,
opts=ResourceOptions(parent=self))
class DataDogGCPIntegration(ComponentResource):
def __init__(
self,
name: str,
enable_log_sink: bool = False,
opts: ResourceOptions = None):
super().__init__('datadog_gcp_integration:index:DataDogGCPIntegration', name, None, opts)
suffix = pulumi_random.RandomString(
f'{name}-gcp-sa-suffix',
length=3,
min_lower=3,
opts=ResourceOptions(parent=self))
gcp_sa = gcp.serviceaccount.Account(
f'{name}-gcp-sa',
account_id=Output.concat('datadog-integration-', suffix.result),
description='DataDog GCP Integration SA',
opts=ResourceOptions(parent=self))
roles = [
'roles/cloudasset.viewer',
'roles/compute.viewer',
'roles/container.viewer',
'roles/monitoring.viewer',
]
iam_members = []
for role in roles:
member = gcp.projects.IAMMember(
f'{name}-gcp-sa-role-{role}',
role=role,
member=gcp_sa.email.apply(lambda email: f'serviceAccount:{email}'),
opts=ResourceOptions(parent=self))
iam_members.append(member)
gcp_sa_key = gcp.serviceaccount.Key(
f'{name}-gcp-sa-key',
service_account_id=gcp_sa.name,
opts=ResourceOptions(parent=self))
gcp_sa_pk = gcp_sa_key.private_key.apply(lambda k: json.loads(base64.b64decode(k)))
gcp_integration = datadog.gcp.Integration(
f'{name}-datadog-gcp-integration',
client_email=gcp_sa_pk.apply(lambda k: k['client_email']),
client_id=gcp_sa_pk.apply(lambda k: k['client_id']),
private_key=gcp_sa_pk.apply(lambda k: k['private_key']),
private_key_id=gcp_sa_pk.apply(lambda k: k['private_key_id']),
project_id=gcp_sa_pk.apply(lambda k: k['project_id']),
opts=ResourceOptions(parent=self, depends_on=iam_members))
if enable_log_sink:
GCPLogSinkToDataDog(
f'{name}-export-gcp-logs-to-datadog',
opts=ResourceOptions(parent=self, depends_on=[gcp_integration]))
|
[
"pulumi_gcp.pubsub.SubscriptionExpirationPolicyArgs",
"pulumi.output.Output.concat",
"pulumi_gcp.pubsub.SubscriptionRetryPolicyArgs",
"pulumi.Config",
"pulumi_gcp.pubsub.SubscriptionPushConfigArgs",
"pulumi.resource.ResourceOptions",
"base64.b64decode",
"pulumi_gcp.organizations.get_project"
] |
[((1341, 1372), 'pulumi_gcp.organizations.get_project', 'gcp.organizations.get_project', ([], {}), '()\n', (1370, 1372), True, 'import pulumi_gcp as gcp\n'), ((578, 606), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (593, 606), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((630, 659), 'pulumi.Config', 'pulumi.Config', ([], {'name': '"""datadog"""'}), "(name='datadog')\n", (643, 659), False, 'import pulumi\n'), ((867, 1021), 'pulumi_gcp.pubsub.SubscriptionPushConfigArgs', 'gcp.pubsub.SubscriptionPushConfigArgs', ([], {'push_endpoint': 'f"""https://gcp-intake.logs.datadoghq.eu/api/v2/logs?dd-api-key={dd_api_key}&dd-protocol=gcp"""'}), "(push_endpoint=\n f'https://gcp-intake.logs.datadoghq.eu/api/v2/logs?dd-api-key={dd_api_key}&dd-protocol=gcp'\n )\n", (904, 1021), True, 'import pulumi_gcp as gcp\n'), ((1060, 1111), 'pulumi_gcp.pubsub.SubscriptionExpirationPolicyArgs', 'gcp.pubsub.SubscriptionExpirationPolicyArgs', ([], {'ttl': '""""""'}), "(ttl='')\n", (1103, 1111), True, 'import pulumi_gcp as gcp\n'), ((1155, 1244), 'pulumi_gcp.pubsub.SubscriptionRetryPolicyArgs', 'gcp.pubsub.SubscriptionRetryPolicyArgs', ([], {'minimum_backoff': '"""10s"""', 'maximum_backoff': '"""600s"""'}), "(minimum_backoff='10s',\n maximum_backoff='600s')\n", (1193, 1244), True, 'import pulumi_gcp as gcp\n'), ((1292, 1320), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (1307, 1320), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((1690, 1718), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (1705, 1718), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((1864, 1913), 'pulumi.output.Output.concat', 'Output.concat', (['"""pubsub.googleapis.com/"""', 'topic.id'], {}), "('pubsub.googleapis.com/', topic.id)\n", (1877, 1913), False, 'from pulumi.output import Output\n'), ((1973, 2026), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self', 'depends_on': '[push_to_dd]'}), '(parent=self, depends_on=[push_to_dd])\n', (1988, 2026), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((2246, 2274), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (2261, 2274), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((2716, 2744), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (2731, 2744), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((2845, 2897), 'pulumi.output.Output.concat', 'Output.concat', (['"""datadog-integration-"""', 'suffix.result'], {}), "('datadog-integration-', suffix.result)\n", (2858, 2897), False, 'from pulumi.output import Output\n'), ((2970, 2998), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (2985, 2998), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((3669, 3697), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (3684, 3697), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((4255, 4307), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self', 'depends_on': 'iam_members'}), '(parent=self, depends_on=iam_members)\n', (4270, 4307), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((3458, 3486), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (3473, 3486), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n'), ((3770, 3789), 'base64.b64decode', 'base64.b64decode', (['k'], {}), '(k)\n', (3786, 3789), False, 'import json, base64\n'), ((4446, 4504), 'pulumi.resource.ResourceOptions', 'ResourceOptions', ([], {'parent': 'self', 'depends_on': '[gcp_integration]'}), '(parent=self, depends_on=[gcp_integration])\n', (4461, 4504), False, 'from pulumi.resource import ComponentResource, ResourceOptions\n')]
|
import tensorflow as tf
import numpy as np
from model import decoder,vae
import cv2
vae.load_weights("vae_cnn.h5")
lv = np.load("lv.npy")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter("output.avi", fourcc, 30.0, (208, 120))
for i in range(1000):
data = lv[i].reshape(1,128)
img = decoder.predict(data)
img = np.array(img).reshape(120,208,1)
img = img * 255
img = np.array(img).astype("uint8")
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
video.write(img)
video.release()
|
[
"numpy.load",
"cv2.VideoWriter_fourcc",
"model.decoder.predict",
"cv2.cvtColor",
"numpy.array",
"cv2.VideoWriter",
"model.vae.load_weights"
] |
[((85, 115), 'model.vae.load_weights', 'vae.load_weights', (['"""vae_cnn.h5"""'], {}), "('vae_cnn.h5')\n", (101, 115), False, 'from model import decoder, vae\n'), ((121, 138), 'numpy.load', 'np.load', (['"""lv.npy"""'], {}), "('lv.npy')\n", (128, 138), True, 'import numpy as np\n'), ((148, 179), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (170, 179), False, 'import cv2\n'), ((188, 243), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output.avi"""', 'fourcc', '(30.0)', '(208, 120)'], {}), "('output.avi', fourcc, 30.0, (208, 120))\n", (203, 243), False, 'import cv2\n'), ((309, 330), 'model.decoder.predict', 'decoder.predict', (['data'], {}), '(data)\n', (324, 330), False, 'from model import decoder, vae\n'), ((444, 481), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2RGB'], {}), '(img, cv2.COLOR_GRAY2RGB)\n', (456, 481), False, 'import cv2\n'), ((341, 354), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (349, 354), True, 'import numpy as np\n'), ((404, 417), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (412, 417), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
class Bert_Base(nn.Module):
def __init__(self, opt):
super(Bert_Base, self).__init__()
self.opt = opt
#self.tokenizer = BertTokenizer.from_pretrained(model_path)
def forward(self, inputs, use_hidden_state=False):
text_raw_indices, text_raw_indices_mask, aspect_position_text = inputs[0], inputs[1], inputs[2]
ctx = self.opt.bse.get_vector(text_raw_indices)
ctx_len = torch.sum(text_raw_indices_mask != 0, dim=1)
vectors = []
aspect_vectors = []
asp_len = []
for idx, vector in enumerate(ctx):
# print(aspect_position_text[idx])
# print(vector.size())
#vector = torch.stack(vector)
left, right = aspect_position_text[idx].split('_')
vector = [np.asarray(each, dtype=float) for each in vector]
aspect_vector = vector[int(left):int(right)]
# if self.opt.device:
# vector = vector.cpu()
# aspect_vector = aspect_vector.cpu()
pad_number = self.opt.max_seq_len - len(vector) + 2
#ctx_len.append(len(vector))
vector = np.asarray(vector, dtype=float)
vector = vector[1:-1]
vector = np.concatenate((vector, np.zeros((pad_number, self.opt.embed_dim))))
vector = vector.astype('float32')
vector = torch.from_numpy(vector)
#pad_tuple = (0, 0, left, 0)
#vector = F.pad(vector, pad_tuple, 'constant', 0)
pad_number = self.opt.max_seq_len - len(aspect_vector)
asp_len.append(len(aspect_vector))
aspect_vector = np.asarray(aspect_vector)
aspect_vector = np.concatenate((aspect_vector, np.zeros((pad_number, self.opt.embed_dim))))
aspect_vector = aspect_vector.astype('float32')
aspect_vector = torch.from_numpy(aspect_vector)
if self.opt.device:
vector = vector.to(self.opt.device)
aspect_vector = aspect_vector.to(self.opt.device)
vectors.append(vector)
aspect_vectors.append(aspect_vector)
ctx = torch.stack(vectors)
asp = torch.stack(aspect_vectors)
asp_len = torch.from_numpy(np.asarray(asp_len))
#ctx_len = torch.from_numpy(np.asarray(ctx_len))
if self.opt.device:
asp_len = asp_len.to(self.opt.device)
ctx_len = ctx_len.to(self.opt.device)
ctx.requires_grad = False
asp.requires_grad = False
# print(vectors.size())
# print(aspect_vectors.size())
return ctx, asp, ctx_len, asp_len
|
[
"torch.stack",
"numpy.asarray",
"numpy.zeros",
"torch.cuda.is_available",
"torch.device",
"torch.sum",
"torch.from_numpy"
] |
[((98, 123), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (121, 123), False, 'import torch\n'), ((133, 176), 'torch.device', 'torch.device', (["('cuda' if USE_CUDA else 'cpu')"], {}), "('cuda' if USE_CUDA else 'cpu')\n", (145, 176), False, 'import torch\n'), ((603, 647), 'torch.sum', 'torch.sum', (['(text_raw_indices_mask != 0)'], {'dim': '(1)'}), '(text_raw_indices_mask != 0, dim=1)\n', (612, 647), False, 'import torch\n'), ((2324, 2344), 'torch.stack', 'torch.stack', (['vectors'], {}), '(vectors)\n', (2335, 2344), False, 'import torch\n'), ((2359, 2386), 'torch.stack', 'torch.stack', (['aspect_vectors'], {}), '(aspect_vectors)\n', (2370, 2386), False, 'import torch\n'), ((1332, 1363), 'numpy.asarray', 'np.asarray', (['vector'], {'dtype': 'float'}), '(vector, dtype=float)\n', (1342, 1363), True, 'import numpy as np\n'), ((1555, 1579), 'torch.from_numpy', 'torch.from_numpy', (['vector'], {}), '(vector)\n', (1571, 1579), False, 'import torch\n'), ((1826, 1851), 'numpy.asarray', 'np.asarray', (['aspect_vector'], {}), '(aspect_vector)\n', (1836, 1851), True, 'import numpy as np\n'), ((2044, 2075), 'torch.from_numpy', 'torch.from_numpy', (['aspect_vector'], {}), '(aspect_vector)\n', (2060, 2075), False, 'import torch\n'), ((2422, 2441), 'numpy.asarray', 'np.asarray', (['asp_len'], {}), '(asp_len)\n', (2432, 2441), True, 'import numpy as np\n'), ((970, 999), 'numpy.asarray', 'np.asarray', (['each'], {'dtype': 'float'}), '(each, dtype=float)\n', (980, 999), True, 'import numpy as np\n'), ((1443, 1485), 'numpy.zeros', 'np.zeros', (['(pad_number, self.opt.embed_dim)'], {}), '((pad_number, self.opt.embed_dim))\n', (1451, 1485), True, 'import numpy as np\n'), ((1911, 1953), 'numpy.zeros', 'np.zeros', (['(pad_number, self.opt.embed_dim)'], {}), '((pad_number, self.opt.embed_dim))\n', (1919, 1953), True, 'import numpy as np\n')]
|
from typing import Optional
from aioros_action import ActionClient
from aioros_action import create_client
from aioros import NodeHandle
from aioros_tf2.abc import BufferInterface
from aioros_tf2.exceptions import ConnectivityException
from aioros_tf2.exceptions import ExtrapolationException
from aioros_tf2.exceptions import InvalidArgumentException
from aioros_tf2.exceptions import LookupException
from aioros_tf2.exceptions import TimeoutException
from aioros_tf2.exceptions import TransformException
from genpy import Duration
from genpy import Time
from geometry_msgs.msg import TransformStamped
from tf2_msgs.msg import LookupTransformAction
from tf2_msgs.msg import LookupTransformGoal
from tf2_msgs.msg import TF2Error
class BufferActionClient(BufferInterface):
def __init__(self, ns: str) -> None:
self._ns = ns
self._action_client: Optional[ActionClient] = None
async def init(
self,
node_handle: NodeHandle
) -> None:
self._action_client = await create_client(
node_handle,
self._ns,
LookupTransformAction)
async def close(self) -> None:
if self._action_client:
await self._action_client.close()
self._action_client = None
async def wait_for_server(self) -> None:
await self._action_client.wait_for_server()
async def lookup_transform(
self,
target_frame: str,
source_frame: str,
time: Time,
timeout: Optional[Duration] = None
) -> TransformStamped:
return await self._call_action(
LookupTransformGoal(
target_frame=target_frame,
source_frame=source_frame,
source_time=time,
timeout=timeout or Duration(),
advanced=False))
async def lookup_transform_full(
self,
target_frame: str,
target_time: Time,
source_frame: str,
source_time: Time,
fixed_frame: str,
timeout: Optional[Duration] = None
) -> TransformStamped:
return await self._call_action(
LookupTransformGoal(
target_frame=target_frame,
source_frame=source_frame,
source_time=source_time,
timeout=timeout or Duration(),
target_time=target_time,
fixed_frame=fixed_frame,
advanced=True))
async def can_transform(
self,
target_frame: str,
source_frame: str,
time: Time,
timeout: Optional[Duration] = None
) -> bool:
try:
self.lookup_transform(
target_frame,
source_frame,
time,
timeout)
return True
except TransformException:
return False
async def can_transform_full(
self,
target_frame: str,
target_time: Time,
source_frame: str,
source_time: Time,
fixed_frame: str,
timeout: Optional[Duration] = None
) -> bool:
try:
self.lookup_transform_full(
target_frame,
target_time,
source_frame,
source_time,
fixed_frame,
timeout)
return True
except TransformException:
return False
async def _call_action(
self,
goal: LookupTransformGoal
) -> TransformStamped:
goal_handle = self._action_client.send_goal(goal)
result = await goal_handle.wait_for_result()
if result.error.error != TF2Error.NO_ERROR:
if result.error.error == TF2Error.LOOKUP_ERROR:
raise LookupException(result.error.error_string)
elif result.error.error == TF2Error.CONNECTIVITY_ERROR:
raise ConnectivityException(result.error.error_string)
elif result.error.error == TF2Error.EXTRAPOLATION_ERROR:
raise ExtrapolationException(result.error.error_string)
elif result.error.error == TF2Error.INVALID_ARGUMENT_ERROR:
raise InvalidArgumentException(result.error.error_string)
elif result.error.error == TF2Error.TIMEOUT_ERROR:
raise TimeoutException(result.error.error_string)
else:
raise TransformException(result.error.error_string)
return result.transform
async def create_buffer_action_client(
node_handle: NodeHandle,
ns: str,
) -> BufferActionClient:
buffer_action_client = BufferActionClient(ns)
await buffer_action_client.init(node_handle)
return buffer_action_client
|
[
"genpy.Duration",
"aioros_tf2.exceptions.LookupException",
"aioros_tf2.exceptions.TimeoutException",
"aioros_tf2.exceptions.ConnectivityException",
"aioros_tf2.exceptions.TransformException",
"aioros_tf2.exceptions.InvalidArgumentException",
"aioros_action.create_client",
"aioros_tf2.exceptions.ExtrapolationException"
] |
[((1018, 1077), 'aioros_action.create_client', 'create_client', (['node_handle', 'self._ns', 'LookupTransformAction'], {}), '(node_handle, self._ns, LookupTransformAction)\n', (1031, 1077), False, 'from aioros_action import create_client\n'), ((3762, 3804), 'aioros_tf2.exceptions.LookupException', 'LookupException', (['result.error.error_string'], {}), '(result.error.error_string)\n', (3777, 3804), False, 'from aioros_tf2.exceptions import LookupException\n'), ((3895, 3943), 'aioros_tf2.exceptions.ConnectivityException', 'ConnectivityException', (['result.error.error_string'], {}), '(result.error.error_string)\n', (3916, 3943), False, 'from aioros_tf2.exceptions import ConnectivityException\n'), ((4035, 4084), 'aioros_tf2.exceptions.ExtrapolationException', 'ExtrapolationException', (['result.error.error_string'], {}), '(result.error.error_string)\n', (4057, 4084), False, 'from aioros_tf2.exceptions import ExtrapolationException\n'), ((1785, 1795), 'genpy.Duration', 'Duration', ([], {}), '()\n', (1793, 1795), False, 'from genpy import Duration\n'), ((2321, 2331), 'genpy.Duration', 'Duration', ([], {}), '()\n', (2329, 2331), False, 'from genpy import Duration\n'), ((4179, 4230), 'aioros_tf2.exceptions.InvalidArgumentException', 'InvalidArgumentException', (['result.error.error_string'], {}), '(result.error.error_string)\n', (4203, 4230), False, 'from aioros_tf2.exceptions import InvalidArgumentException\n'), ((4316, 4359), 'aioros_tf2.exceptions.TimeoutException', 'TimeoutException', (['result.error.error_string'], {}), '(result.error.error_string)\n', (4332, 4359), False, 'from aioros_tf2.exceptions import TimeoutException\n'), ((4400, 4445), 'aioros_tf2.exceptions.TransformException', 'TransformException', (['result.error.error_string'], {}), '(result.error.error_string)\n', (4418, 4445), False, 'from aioros_tf2.exceptions import TransformException\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.