id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
20004
|
import os
import sys
from typing import Iterable
from jinja2 import Environment, FileSystemLoader, Template
import config as cfg
from . import app_root_dir, doc_root_dir, resource_dir, template_dir
_usage = "Usage: generate.py <onprem|aws|gcp|azure|k8s|alibabacloud|oci|programming|saas>"
def load_tmpl(tmpl: str) -> Template:
env = Environment(loader=FileSystemLoader(template_dir()))
env.filters["up_or_title"] = up_or_title
return env.get_template(tmpl)
def up_or_title(pvd: str, s: str) -> str:
if s in cfg.UPPER_WORDS.get(pvd, ()):
return s.upper()
if s in cfg.TITLE_WORDS.get(pvd, {}):
return cfg.TITLE_WORDS[pvd][s]
return s.title()
def gen_classes(pvd: str, typ: str, paths: Iterable[str]) -> str:
"""Generate all service node classes based on resources paths with class templates."""
tmpl = load_tmpl(cfg.TMPL_MODULE)
# TODO: extract the gen class metas for sharing
# TODO: independent function for generating all pvd/typ/paths pairs
def _gen_class_meta(path: str) -> dict:
base = os.path.splitext(path)[0]
name = "".join([up_or_title(pvd, s) for s in base.split("-")])
return {"name": name, "icon": path}
metas = map(_gen_class_meta, paths)
aliases = cfg.ALIASES[pvd][typ] if typ in cfg.ALIASES[pvd] else {}
return tmpl.render(pvd=pvd, typ=typ, metas=metas, aliases=aliases)
def gen_apidoc(pvd: str, typ_paths: dict) -> str:
tmpl = load_tmpl(cfg.TMPL_APIDOC)
# TODO: remove
def _gen_class_name(path: str) -> str:
base = os.path.splitext(path)[0]
name = "".join([up_or_title(pvd, s) for s in base.split("-")])
return name
typ_classes = {}
for typ, paths in sorted(typ_paths.items()):
typ_classes[typ] = []
for name in map(_gen_class_name, paths):
alias = cfg.ALIASES[pvd].get(typ, {}).get(name)
typ_classes[typ].append({"name": name, "alias": alias})
return tmpl.render(pvd=pvd, typ_classes=typ_classes)
def make_module(pvd: str, typ: str, classes: str) -> None:
"""Create a module file"""
mod_path = os.path.join(app_root_dir(pvd), f"{typ}.py")
with open(mod_path, "w+") as f:
f.write(classes)
def make_apidoc(pvd: str, content: str) -> None:
"""Create an api documentation file"""
mod_path = os.path.join(doc_root_dir(), f"{pvd}.md")
with open(mod_path, "w+") as f:
f.write(content)
def generate(pvd: str) -> None:
"""Generates a service node classes."""
typ_paths = {}
for root, _, files in os.walk(resource_dir(pvd)):
# Extract the names and paths from resources.
files.sort()
pngs = list(filter(lambda f: f.endswith(".png"), files))
paths = list(filter(lambda f: "rounded" not in f, pngs))
# Skip the top-root directory.
typ = os.path.basename(root)
if typ == pvd:
continue
classes = gen_classes(pvd, typ, paths)
make_module(pvd, typ, classes)
typ_paths[typ] = paths
# Build API documentation
apidoc = gen_apidoc(pvd, typ_paths)
make_apidoc(pvd, apidoc)
if __name__ == "__main__":
pvd = sys.argv[1]
if pvd not in cfg.PROVIDERS:
sys.exit()
generate(pvd)
|
StarcoderdataPython
|
3323330
|
<reponame>ZTjack/tesseract.js
'''
@Author: Jack
@Date: 2020-04-02 12:55:27
@LastEditors: Jack
@LastEditTime: 2020-04-02 13:46:23
@Description: 把一张图片从歪的变成正的,方便读取信息 http://developers.goalist.co.jp/entry/2019/02/13/150126
'''
import cv2 # opencv-python
import numpy as np
from skimage.filters import threshold_local # scikit-image
import imutils
from matplotlib import pyplot as plt
def show_img(opencv_img):
image = cv2.cvtColor(opencv_img, cv2.COLOR_BGR2RGB)
pixels = np.array(image)
plt.imshow(pixels)
plt.show()
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right poi
# nt will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
# read the input image
image = cv2.imread("test2.png")
# clone the original image
original_image = image.copy()
# resize using ratio (old height to the new height)
ratio = image.shape[0] / 500.0
image = imutils.resize(image, height=500)
# # change the color space to YUV
image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
# # grap only the Y component
image_y = np.zeros(image_yuv.shape[0:2], np.uint8)
image_y[:, :] = image_yuv[:, :, 0]
# # blur the image to reduce high frequency noises
image_blurred = cv2.GaussianBlur(image_y, (3, 3), 0)
# show_img(image_blurred)
# # find edges in the image
edges = cv2.Canny(image_blurred, 50, 200, apertureSize=3)
# show_img(edges)
# # find contours
contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# # draw all contours on the original image
cv2.drawContours(image, contours, -1, (0, 255, 0), 1)
# show_img(image)
# # !! Attention !! Do not draw contours on the image at this point
# # I have drawn all the contours just to show below image
# # to collect all the detected polygons
polygons = []
# loop over the contours
for cnt in contours:
# find the convex hull
hull = cv2.convexHull(cnt)
# compute the approx polygon and put it into polygons
polygons.append(cv2.approxPolyDP(hull, 0.01 * cv2.arcLength(hull, True), False))
# # sort polygons in desc order of contour area
# print(polygons)
sortedPoly = sorted(polygons, key=cv2.contourArea, reverse=True)
# # draw points of the intersection of only the largest polyogon with red color
cv2.drawContours(image, sortedPoly[0], -1, (0, 0, 255), 5)
# show_img(image)
# # get the contours of the largest polygon in the image
simplified_cnt = sortedPoly[0]
simplified_cnt = simplified_cnt[:-1]
# # check if the polygon has four point
if len(simplified_cnt) == 4:
# trasform the prospective of original image
cropped_image = four_point_transform(original_image, simplified_cnt.reshape(4, 2) * ratio)
# Binarize the cropped image
gray_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)
T = threshold_local(gray_image, 11, offset=10, method="gaussian")
binarized_image = (gray_image > T).astype("uint8") * 255
show_img(binarized_image)
show_img(cropped_image)
# cv.imwrite('binarized_image.jpg', binarized_image)
# cv.imwrite('cropped_image.jpg', midpoint(cropped_image))
|
StarcoderdataPython
|
136983
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more informations
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Classes related to the EAP protocol.
"""
from __future__ import absolute_import
from __future__ import print_function
import struct
from scapy.fields import BitField, ByteField, XByteField, ByteEnumField,\
ShortField, IntField, XIntField, ByteEnumField, StrLenField, XStrField,\
XStrLenField, XStrFixedLenField, LenField, FieldLenField, PacketField,\
PacketListField, ConditionalField, PadField
from scapy.packet import Packet, Padding, bind_layers
from scapy.layers.l2 import SourceMACField, Ether, CookedLinux, GRE, SNAP
from scapy.utils import issubtype
from scapy.config import conf
from scapy.compat import orb, chb
#
# EAPOL
#
#########################################################################
#
# EAPOL protocol version
# IEEE Std 802.1X-2010 - Section 11.3.1
#########################################################################
#
eapol_versions = {
0x1: "802.1X-2001",
0x2: "802.1X-2004",
0x3: "802.1X-2010",
}
#########################################################################
#
# EAPOL Packet Types
# IEEE Std 802.1X-2010 - Table 11.3
#########################################################################
#
eapol_types = {
0x0: "EAP-Packet", # "EAPOL-EAP" in 801.1X-2010
0x1: "EAPOL-Start",
0x2: "EAPOL-Logoff",
0x3: "EAPOL-Key",
0x4: "EAPOL-Encapsulated-ASF-Alert",
0x5: "EAPOL-MKA",
0x6: "EAPOL-Announcement (Generic)",
0x7: "EAPOL-Announcement (Specific)",
0x8: "EAPOL-Announcement-Req"
}
class EAPOL(Packet):
"""
EAPOL - IEEE Std 802.1X-2010
"""
name = "EAPOL"
fields_desc = [
ByteEnumField("version", 1, eapol_versions),
ByteEnumField("type", 0, eapol_types),
LenField("len", None, "H")
]
EAP_PACKET = 0
START = 1
LOGOFF = 2
KEY = 3
ASF = 4
def extract_padding(self, s):
l = self.len
return s[:l], s[l:]
def hashret(self):
return chb(self.type) + self.payload.hashret()
def answers(self, other):
if isinstance(other, EAPOL):
if ((self.type == self.EAP_PACKET) and
(other.type == self.EAP_PACKET)):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("EAPOL %EAPOL.type%")
#
# EAP
#
#########################################################################
#
# EAP methods types
# http://www.iana.org/assignments/eap-numbers/eap-numbers.xhtml#eap-numbers-4
#########################################################################
#
eap_types = {
0: "Reserved",
1: "Identity",
2: "Notification",
3: "Legacy Nak",
4: "MD5-Challenge",
5: "One-Time Password (OTP)",
6: "Generic Token Card (GTC)",
7: "Allocated - RFC3748",
8: "Allocated - RFC3748",
9: "RSA Public Key Authentication",
10: "DSS Unilateral",
11: "KEA",
12: "KEA-VALIDATE",
13: "EAP-TLS",
14: "Defender Token (AXENT)",
15: "RSA Security SecurID EAP",
16: "Arcot Systems EAP",
17: "EAP-Cisco Wireless",
18: "GSM Subscriber Identity Modules (EAP-SIM)",
19: "SRP-SHA1",
20: "Unassigned",
21: "EAP-TTLS",
22: "Remote Access Service",
23: "EAP-AKA Authentication",
24: "EAP-3Com Wireless",
25: "PEAP",
26: "MS-EAP-Authentication",
27: "Mutual Authentication w/Key Exchange (MAKE)",
28: "CRYPTOCard",
29: "EAP-MSCHAP-V2",
30: "DynamID",
31: "Rob EAP",
32: "Protected One-Time Password",
33: "MS-Authentication-TLV",
34: "SentriNET",
35: "EAP-Actiontec Wireless",
36: "Cogent Systems Biometrics Authentication EAP",
37: "AirFortress EAP",
38: "EAP-HTTP Digest",
39: "SecureSuite EAP",
40: "DeviceConnect EAP",
41: "EAP-SPEKE",
42: "EAP-MOBAC",
43: "EAP-FAST",
44: "ZoneLabs EAP (ZLXEAP)",
45: "EAP-Link",
46: "EAP-PAX",
47: "EAP-PSK",
48: "EAP-SAKE",
49: "EAP-IKEv2",
50: "EAP-AKA",
51: "EAP-GPSK",
52: "EAP-pwd",
53: "EAP-EKE Version 1",
54: "EAP Method Type for PT-EAP",
55: "TEAP",
254: "Reserved for the Expanded Type",
255: "Experimental",
}
#########################################################################
#
# EAP codes
# http://www.iana.org/assignments/eap-numbers/eap-numbers.xhtml#eap-numbers-1
#########################################################################
#
eap_codes = {
1: "Request",
2: "Response",
3: "Success",
4: "Failure",
5: "Initiate",
6: "Finish"
}
class EAP(Packet):
"""
RFC 3748 - Extensible Authentication Protocol (EAP)
"""
name = "EAP"
fields_desc = [
ByteEnumField("code", 4, eap_codes),
ByteField("id", 0),
ShortField("len", None),
ConditionalField(ByteEnumField("type", 0, eap_types),
lambda pkt:pkt.code not in [
EAP.SUCCESS, EAP.FAILURE]),
ConditionalField(ByteEnumField("desired_auth_type", 0, eap_types),
lambda pkt:pkt.code == EAP.RESPONSE and pkt.type == 3),
ConditionalField(
StrLenField("identity", '', length_from=lambda pkt: pkt.len - 5),
lambda pkt: pkt.code == EAP.RESPONSE and hasattr(pkt, 'type') and pkt.type == 1),
ConditionalField(
StrLenField("message", '', length_from=lambda pkt: pkt.len - 5),
lambda pkt: pkt.code == EAP.REQUEST and hasattr(pkt, 'type') and pkt.type == 1)
]
#########################################################################
#
# EAP codes
# http://www.iana.org/assignments/eap-numbers/eap-numbers.xhtml#eap-numbers-1
#########################################################################
#
REQUEST = 1
RESPONSE = 2
SUCCESS = 3
FAILURE = 4
INITIATE = 5
FINISH = 6
registered_methods = {}
@classmethod
def register_variant(cls):
cls.registered_methods[cls.type.default] = cls
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt:
c = orb(_pkt[0])
if c in [1, 2] and len(_pkt) >= 5:
t = orb(_pkt[4])
return cls.registered_methods.get(t, cls)
return cls
def haslayer(self, cls):
if cls == "EAP":
if isinstance(self, EAP):
return True
elif issubtype(cls, EAP):
if isinstance(self, cls):
return True
return super(EAP, self).haslayer(cls)
def getlayer(self, cls, nb=1, _track=None, _subclass=True, **flt):
return super(EAP, self).getlayer(cls, nb=nb, _track=_track,
_subclass=True, **flt)
def answers(self, other):
if isinstance(other, EAP):
if self.code == self.REQUEST:
return 0
elif self.code == self.RESPONSE:
if ((other.code == self.REQUEST) and
(other.type == self.type)):
return 1
elif other.code == self.RESPONSE:
return 1
return 0
def mysummary(self):
summary_str = "EAP %{eap_class}.code% %{eap_class}.type%".format(
eap_class=self.__class__.__name__
)
if self.type == 1 and self.code == EAP.RESPONSE:
summary_str += " %{eap_class}.identity%".format(
eap_class=self.__class__.__name__
)
return self.sprintf(summary_str)
def post_build(self, p, pay):
if self.len is None:
l = len(p) + len(pay)
p = p[:2] + chb((l >> 8) & 0xff) + chb(l & 0xff) + p[4:]
return p + pay
def guess_payload_class(self, _):
return Padding
class EAP_MD5(EAP):
"""
RFC 3748 - "Extensible Authentication Protocol (EAP)"
"""
name = "EAP-MD5"
fields_desc = [
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
FieldLenField("len", None, fmt="H", length_of="optional_name",
adjust=lambda p, x: x + 6 + (p.value_size or 0)),
ByteEnumField("type", 4, eap_types),
FieldLenField("value_size", None, fmt="B", length_of="value"),
XStrLenField("value", '', length_from=lambda p: p.value_size),
XStrLenField("optional_name", '', length_from=lambda p: 0 if p.len is None or p.value_size is None else (p.len - p.value_size - 6))
]
class EAP_TLS(EAP):
"""
RFC 5216 - "The EAP-TLS Authentication Protocol"
"""
name = "EAP-TLS"
fields_desc = [
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
FieldLenField("len", None, fmt="H", length_of="tls_data",
adjust=lambda p, x: x + 10 if p.L == 1 else x + 6),
ByteEnumField("type", 13, eap_types),
BitField('L', 0, 1),
BitField('M', 0, 1),
BitField('S', 0, 1),
BitField('reserved', 0, 5),
ConditionalField(IntField('tls_message_len', 0), lambda pkt: pkt.L == 1),
XStrLenField('tls_data', '', length_from=lambda pkt: 0 if pkt.len is None else pkt.len - (6 + 4 * pkt.L))
]
class EAP_TTLS(EAP):
"""
RFC 5281 - "Extensible Authentication Protocol Tunneled Transport Layer
Security Authenticated Protocol Version 0 (EAP-TTLSv0)"
"""
name = "EAP-TTLS"
fields_desc = [
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
FieldLenField("len", None, fmt="H", length_of="data",
adjust=lambda p, x: x + 10 if p.L == 1 else x + 6),
ByteEnumField("type", 21, eap_types),
BitField("L", 0, 1),
BitField("M", 0, 1),
BitField("S", 0, 1),
BitField("reserved", 0, 2),
BitField("version", 0, 3),
ConditionalField(IntField("message_len", 0), lambda pkt: pkt.L == 1),
XStrLenField("data", "", length_from=lambda pkt: 0 if pkt.len is None else pkt.len - (6 + 4 * pkt.L))
]
class EAP_PEAP(EAP):
"""
draft-josefsson-pppext-eap-tls-eap-05.txt - "Protected EAP Protocol (PEAP)"
"""
name = "PEAP"
fields_desc = [
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
FieldLenField("len", None, fmt="H", length_of="tls_data",
adjust=lambda p, x: x + 10 if p.L == 1 else x + 6),
ByteEnumField("type", 25, eap_types),
BitField("L", 0, 1),
BitField("M", 0, 1),
BitField("S", 0, 1),
BitField("reserved", 0, 3),
BitField("version", 1, 2),
ConditionalField(IntField("tls_message_len", 0), lambda pkt: pkt.L == 1),
XStrLenField("tls_data", "", length_from=lambda pkt: 0 if pkt.len is None else pkt.len - (6 + 4 * pkt.L))
]
class EAP_FAST(EAP):
"""
RFC 4851 - "The Flexible Authentication via Secure Tunneling
Extensible Authentication Protocol Method (EAP-FAST)"
"""
name = "EAP-FAST"
fields_desc = [
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
FieldLenField("len", None, fmt="H", length_of="data",
adjust=lambda p, x: x + 10 if p.L == 1 else x + 6),
ByteEnumField("type", 43, eap_types),
BitField('L', 0, 1),
BitField('M', 0, 1),
BitField('S', 0, 1),
BitField('reserved', 0, 2),
BitField('version', 0, 3),
ConditionalField(IntField('message_len', 0), lambda pkt: pkt.L == 1),
XStrLenField('data', '', length_from=lambda pkt: 0 if pkt.len is None else pkt.len - (6 + 4 * pkt.L))
]
class LEAP(EAP):
"""
Cisco LEAP (Lightweight EAP)
https://freeradius.org/rfc/leap.txt
"""
name = "Cisco LEAP"
fields_desc = [
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
ShortField("len", None),
ByteEnumField("type", 17, eap_types),
ByteField('version', 1),
XByteField('unused', 0),
FieldLenField("count", None, "challenge_response", "B", adjust=lambda p, x: len(p.challenge_response)),
XStrLenField("challenge_response", "", length_from=lambda p: 0 or p.count),
StrLenField("username", "", length_from=lambda p: p.len - (8 + (0 or p.count)))
]
#############################################################################
# IEEE 802.1X-2010 - MACsec Key Agreement (MKA) protocol
#############################################################################
#########################################################################
#
# IEEE 802.1X-2010 standard
# Section 11.11.1
#########################################################################
#
_parameter_set_types = {
1: "Live Peer List",
2: "Potential Peer List",
3: "MACsec SAK Use",
4: "Distributed SAK",
5: "Distributed CAK",
6: "KMD",
7: "Announcement",
255: "ICV Indicator"
}
# Used by MKAParamSet::dispatch_hook() to instantiate the appropriate class
_param_set_cls = {
1: "MKALivePeerListParamSet",
2: "MKAPotentialPeerListParamSet",
3: "MKASAKUseParamSet",
4: "MKADistributedSAKParamSet",
255: "MKAICVSet",
}
class MACsecSCI(Packet):
"""
Secure Channel Identifier.
"""
#########################################################################
#
# IEEE 802.1AE-2006 standard
# Section 9.9
#########################################################################
#
name = "SCI"
fields_desc = [
SourceMACField("system_identifier"),
ShortField("port_identifier", 0)
]
def extract_padding(self, s):
return "", s
class MKAParamSet(Packet):
"""
Class from which every parameter set class inherits (except
MKABasicParamSet, which has no "Parameter set type" field, and must
come first in the list of parameter sets).
"""
MACSEC_DEFAULT_ICV_LEN = 16
EAPOL_MKA_DEFAULT_KEY_WRAP_LEN = 24
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
"""
Returns the right parameter set class.
"""
cls = conf.raw_layer
if _pkt is not None:
ptype = orb(_pkt[0])
return globals().get(_param_set_cls.get(ptype), conf.raw_layer)
return cls
class MKABasicParamSet(Packet):
"""
Basic Parameter Set (802.1X-2010, section 11.11).
"""
#########################################################################
#
# IEEE 802.1X-2010 standard
# Section 11.11
#########################################################################
#
name = "Basic Parameter Set"
fields_desc = [
ByteField("mka_version_id", 0),
ByteField("key_server_priority", 0),
BitField("key_server", 0, 1),
BitField("macsec_desired", 0, 1),
BitField("macsec_capability", 0, 2),
BitField("param_set_body_len", 0, 12),
PacketField("SCI", MACsecSCI(), MACsecSCI),
XStrFixedLenField("actor_member_id", "", length=12),
XIntField("actor_message_number", 0),
XIntField("algorithm_agility", 0),
PadField(
XStrLenField(
"cak_name",
"",
length_from=lambda pkt: (pkt.param_set_body_len - 28)
),
4,
padwith=b"\x00"
)
]
def extract_padding(self, s):
return "", s
class MKAPeerListTuple(Packet):
"""
Live / Potential Peer List parameter sets tuples (802.1X-2010, section 11.11).
"""
name = "Peer List Tuple"
fields_desc = [
XStrFixedLenField("member_id", "", length=12),
XStrFixedLenField("message_number", "", length=4),
]
class MKALivePeerListParamSet(MKAParamSet):
"""
Live Peer List parameter sets (802.1X-2010, section 11.11).
"""
#########################################################################
#
# IEEE 802.1X-2010 standard
# Section 11.11
#########################################################################
#
name = "Live Peer List Parameter Set"
fields_desc = [
PadField(
ByteEnumField(
"param_set_type",
1,
_parameter_set_types
),
2,
padwith=b"\x00"
),
ShortField("param_set_body_len", 0),
PacketListField("member_id_message_num", [], MKAPeerListTuple)
]
class MKAPotentialPeerListParamSet(MKAParamSet):
"""
Potential Peer List parameter sets (802.1X-2010, section 11.11).
"""
#########################################################################
#
# IEEE 802.1X-2010 standard
# Section 11.11
#########################################################################
#
name = "Potential Peer List Parameter Set"
fields_desc = [
PadField(
ByteEnumField(
"param_set_type",
2,
_parameter_set_types
),
2,
padwith=b"\x00"
),
ShortField("param_set_body_len", 0),
PacketListField("member_id_message_num", [], MKAPeerListTuple)
]
class MKASAKUseParamSet(MKAParamSet):
"""
SAK Use Parameter Set (802.1X-2010, section 11.11).
"""
#########################################################################
#
# IEEE 802.1X-2010 standard
# Section 11.11
#########################################################################
#
name = "SAK Use Parameter Set"
fields_desc = [
ByteEnumField("param_set_type", 3, _parameter_set_types),
BitField("latest_key_an", 0, 2),
BitField("latest_key_tx", 0, 1),
BitField("latest_key_rx", 0, 1),
BitField("old_key_an", 0, 2),
BitField("old_key_tx", 0, 1),
BitField("old_key_rx", 0, 1),
BitField("plain_tx", 0, 1),
BitField("plain_rx", 0, 1),
BitField("X", 0, 1),
BitField("delay_protect", 0, 1),
BitField("param_set_body_len", 0, 12),
XStrFixedLenField("latest_key_key_server_member_id", "", length=12),
XStrFixedLenField("latest_key_key_number", "", length=4),
XStrFixedLenField("latest_key_lowest_acceptable_pn", "", length=4),
XStrFixedLenField("old_key_key_server_member_id", "", length=12),
XStrFixedLenField("old_key_key_number", "", length=4),
XStrFixedLenField("old_key_lowest_acceptable_pn", "", length=4)
]
class MKADistributedSAKParamSet(MKAParamSet):
"""
Distributed SAK parameter set (802.1X-2010, section 11.11).
"""
#########################################################################
#
# IEEE 802.1X-2010 standard
# Section 11.11
#########################################################################
#
name = "Distributed SAK parameter set"
fields_desc = [
ByteEnumField("param_set_type", 4, _parameter_set_types),
BitField("distributed_an", 0, 2),
BitField("confidentiality_offset", 0, 2),
BitField("unused", 0, 4),
ShortField("param_set_body_len", 0),
XStrFixedLenField("key_number", "", length=4),
ConditionalField(
XStrFixedLenField("macsec_cipher_suite", "", length=8),
lambda pkt: pkt.param_set_body_len > 28
),
XStrFixedLenField(
"sak_aes_key_wrap",
"",
length=MKAParamSet.EAPOL_MKA_DEFAULT_KEY_WRAP_LEN
)
]
class MKADistributedCAKParamSet(MKAParamSet):
"""
Distributed CAK Parameter Set (802.1X-2010, section 11.11).
"""
#########################################################################
#
# IEEE 802.1X-2010 standard
# Section 11.11
#########################################################################
#
name = "Distributed CAK parameter set"
fields_desc = [
PadField(
ByteEnumField(
"param_set_type",
5,
_parameter_set_types
),
2,
padwith=b"\x00"
),
ShortField("param_set_body_len", 0),
XStrFixedLenField(
"cak_aes_key_wrap",
"",
length=MKAParamSet.EAPOL_MKA_DEFAULT_KEY_WRAP_LEN
),
XStrField("cak_key_name", "")
]
class MKAICVSet(MKAParamSet):
"""
ICV (802.1X-2010, section 11.11).
"""
#########################################################################
#
# IEEE 802.1X-2010 standard
# Section 11.11
#########################################################################
#
name = "ICV"
fields_desc = [
PadField(
ByteEnumField(
"param_set_type",
255,
_parameter_set_types
),
2,
padwith=b"\x00"
),
ShortField("param_set_body_len", 0),
XStrFixedLenField("icv", "", length=MKAParamSet.MACSEC_DEFAULT_ICV_LEN)
]
class MKAParamSetPacketListField(PacketListField):
"""
PacketListField that handles the parameter sets.
"""
PARAM_SET_LEN_MASK = 0b0000111111111111
def m2i(self, pkt, m):
return MKAParamSet(m)
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
len_bytes = struct.unpack("!H", remain[2:4])[0]
param_set_len = self.__class__.PARAM_SET_LEN_MASK & len_bytes
current = remain[:4 + param_set_len]
remain = remain[4 + param_set_len:]
current_packet = self.m2i(pkt, current)
lst.append(current_packet)
return remain, lst
class MKAPDU(Packet):
"""
MACsec Key Agreement Protocol Data Unit.
"""
#########################################################################
#
# IEEE 802.1X-2010 standard
# Section 11.11
#########################################################################
#
name = "MKPDU"
fields_desc = [
PacketField("basic_param_set", "", MKABasicParamSet),
MKAParamSetPacketListField("parameter_sets", [], MKAParamSet),
]
def extract_padding(self, s):
return "", s
bind_layers(Ether, EAPOL, type=34958)
bind_layers(Ether, EAPOL, dst='01:80:c2:00:00:03', type=34958)
bind_layers(CookedLinux, EAPOL, proto=34958)
bind_layers(GRE, EAPOL, proto=34958)
bind_layers(EAPOL, EAP, type=0)
bind_layers(SNAP, EAPOL, code=34958)
bind_layers(EAPOL, MKAPDU, type=5)
|
StarcoderdataPython
|
133815
|
<filename>firefly/distributed/reference.py<gh_stars>100-1000
#coding:utf8
'''
Created on 2013-8-14
@author: lan (www.9miao.com)
'''
from twisted.spread import pb
from firefly.utils.services import Service
class ProxyReference(pb.Referenceable):
'''代理通道'''
def __init__(self):
'''初始化'''
self._service = Service('proxy')
def addService(self,service):
'''添加一条服务通道'''
self._service = service
def remote_callChild(self, command,*arg,**kw):
'''代理发送数据
'''
return self._service.callTarget(command,*arg,**kw)
|
StarcoderdataPython
|
3227947
|
#Faça um programa que leia um número inteiro e diga se ele é
# ou não um número primo.
tot = 0
num = int(input('Digite um numero: '))
for c in range(1,num + 1):
if num % c == 0:
print('\033[34m',end=' ') #se for divisivel
tot += 1 # tot = tot + 1
else:
print('\033[31m',end=' ') #se não for divisivel
print('{}'.format (c),end=' ')
print('\nO número {} foi divisivel {} vezes'.format(num,tot))
if tot == 2:
print('É por isso que ele é PRIMO')
else:
print('Por isso ele NÃO É PRIMO')
|
StarcoderdataPython
|
1745167
|
<reponame>CMiksche/huntlib
#!/usr/bin/env python
import huntlib.data
from unittest import TestCase
class TestMultiReads(TestCase):
def test_read_json(self):
df = huntlib.data.read_json("support/*.json", lines=True)
(rows, cols) = df.shape
self.assertEqual(cols, 6, "The resulting DataFrame had the wrong number of columns.")
self.assertEqual(rows, 3000015, "The resulting DataFrame had the wrong number of rows.")
self.assertEqual(df.index.nunique(), 3000015, "DataFrame index values are not unique.")
def test_read_csv(self):
df = huntlib.data.read_csv("support/*.csv")
(rows, cols) = df.shape
self.assertEqual(cols, 3, "The resulting DataFrame had the wrong number of columns.")
self.assertEqual(rows, 6, "The resulting DataFrame had the wrong number of rows.")
self.assertEqual(df.index.nunique(), 6, "DataFrame index values are not unique.")
|
StarcoderdataPython
|
1620074
|
<reponame>costa86/pypi-scaffold
from os import name
from setuptools import setup
#with open("README.md","r") as fh:
# long_description = fh.read()
name = 'special'
setup(
name=name,
version='0.0.2',
description='A short description',
long_description="Please, refer to Project links to see the documentation guide for this project.",
#long_description = long_description,
#long_description_content_type = "text/markdown",
py_modules=[name],
package_dir={'': 'src'},
url="https://github.com/costa86/pypi-scaffold",
author="<NAME>",
author_email="<EMAIL>",
license='MIT',
classifiers=[
'Programming Language :: Python :: 3.9',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Topic :: Scientific/Engineering :: GIS',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Topic :: Utilities',
'Intended Audience :: Developers'
],
keywords=[
'GIS',
'latitude',
'longitude',
'coordinates',
'geolocation',
'maps'
],
python_requires='>=3'#,
# install_requires = [
# "lib1==1.2"
# ]
)
|
StarcoderdataPython
|
3389930
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 10 14:36:16 2018
@author: Prodipta
"""
from logbook import Logger
from collections import defaultdict
from logbook import Logger
from zipline.finance.blotter import Blotter
from zipline.utils.input_validation import expect_types
from zipline.assets import Asset
from zipline.finance.cancel_policy import EODCancel
log = Logger('LiveBlotter')
warning_logger = Logger('LiveAlgoWarning')
class LiveBlotter(Blotter):
def __init__(self, data_frequency, broker):
self.broker = broker
self.data_frequency = data_frequency
# these orders are aggregated by asset
self.open_orders = defaultdict(list)
# keep a dict of orders by their own id
self.orders = {}
# holding orders that have come in since the last event.
self.new_orders = []
self.current_dt = None
self.max_shares = int(1e+11)
self.data_frequency = data_frequency
self.cancel_policy = EODCancel()
def __repr__(self):
return """
{class_name}(
open_orders={open_orders},
orders={orders},
new_orders={new_orders},
current_dt={current_dt})
""".strip().format(class_name=self.__class__.__name__,
open_orders=self.open_orders,
orders=self.orders,
new_orders=self.new_orders,
current_dt=self.current_dt)
def set_date(self, dt):
self.current_dt = dt
@expect_types(asset=Asset)
def order(self, asset, amount, style, tag):
"""Place an order.
Parameters
----------
asset : zipline.assets.Asset
The asset that this order is for.
amount : int
The amount of shares to order. If ``amount`` is positive, this is
the number of shares to buy or cover. If ``amount`` is negative,
this is the number of shares to sell or short.
style : zipline.finance.execution.ExecutionStyle
The execution style for the order.
order_id : str, optional
The unique identifier for this order.
Returns
-------
order_id : str or None
The unique identifier for this order, or None if no order was
placed.
Notes
-----
amount > 0 :: Buy/Cover
amount < 0 :: Sell/Short
Market order: order(asset, amount)
Limit order: order(asset, amount, style=LimitOrder(limit_price))
Stop order: order(asset, amount, style=StopOrder(stop_price))
StopLimit order: order(asset, amount, style=StopLimitOrder(limit_price,
stop_price))
"""
# something could be done with amount to further divide
# between buy by share count OR buy shares up to a dollar amount
# numeric == share count AND "$dollar.cents" == cost amount
if amount == 0:
# Don't bother placing orders for 0 shares.
return None
elif amount > self.max_shares:
# Arbitrary limit of 100 billion (US) shares will never be
# exceeded except by a buggy algorithm.
raise OverflowError("Can't order more than %d shares" %
self.max_shares)
order_id = self.broker.order(asset, amount, style, tag)
return order_id
def cancel(self, order_id, relay_status=True):
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
order_list = self.open_orders[cur_order.asset]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.cancel()
cur_order.dt = self.current_dt
if relay_status:
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def cancel_all_orders_for_asset(self, asset, warn=False,
relay_status=True):
"""
For a live blotter with EOD cancel policy we never have to do this
"""
raise ValueError('Unexpected cancel_all_orders_for_asset function call from blotter')
def execute_cancel_policy(self, event):
"""
For a live blotter with EOD cancel policy we never have to do this
"""
raise ValueError('Unexpected execute_cancel_policy function call from blotter')
def reject(self, order_id, reason=''):
"""
For a live blotter we never have to do this
"""
raise ValueError('Unexpected reject function call from blotter')
def hold(self, order_id, reason=''):
"""
For a live blotter we never have to do this
"""
raise ValueError('Unexpected hold function call from blotter')
def process_splits(self, splits):
"""
For a live blotter with EOD cancel policy we should never need this
"""
raise ValueError('Unexpected process_splits function call from blotter')
def get_transactions(self, bar_data):
"""
Creates a list of transactions based on the current open orders,
slippage model, and commission model.
Parameters
----------
bar_data: zipline._protocol.BarData
Notes
-----
This method book-keeps the blotter's open_orders dictionary, so that
it is accurate by the time we're done processing open orders.
Returns
-------
transactions_list: List
transactions_list: list of transactions resulting from the current
open orders. If there were no open orders, an empty list is
returned.
commissions_list: List
commissions_list: list of commissions resulting from filling the
open orders. A commission is an object with "asset" and "cost"
parameters.
closed_orders: List
closed_orders: list of all the orders that have filled.
"""
closed_orders = []
transactions = []
commissions = []
if self.open_orders:
for asset, asset_orders in iteritems(self.open_orders):
slippage = self.slippage_models[type(asset)]
for order, txn in \
slippage.simulate(bar_data, asset, asset_orders):
commission = self.commission_models[type(asset)]
additional_commission = commission.calculate(order, txn)
if additional_commission > 0:
commissions.append({
"asset": order.asset,
"order": order,
"cost": additional_commission
})
order.filled += txn.amount
order.commission += additional_commission
order.dt = txn.dt
transactions.append(txn)
if not order.open:
closed_orders.append(order)
return transactions, commissions, closed_orders
def prune_orders(self, closed_orders):
"""
Removes all given orders from the blotter's open_orders list.
Parameters
----------
closed_orders: iterable of orders that are closed.
Returns
-------
None
"""
# remove all closed orders from our open_orders dict
for order in closed_orders:
asset = order.asset
asset_orders = self.open_orders[asset]
try:
asset_orders.remove(order)
except ValueError:
continue
# now clear out the assets from our open_orders dict that have
# zero open orders
for asset in list(self.open_orders.keys()):
if len(self.open_orders[asset]) == 0:
del self.open_orders[asset]
|
StarcoderdataPython
|
1797917
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.chronos.model.forecast.abstract import Forecaster
from zoo.chronos.model.forecast.utils import np_to_creator, set_pytorch_seed
from zoo.chronos.model.tcn import TCNPytorch
from zoo.chronos.model.tcn import model_creator, optimizer_creator, loss_creator
from zoo.orca.data import XShards
from zoo.orca.learn.pytorch.estimator import Estimator
from zoo.orca.learn.metrics import MSE, MAE
import torch
import numpy as np
import os
ORCA_METRICS = {"mse": MSE, "mae": MAE}
class TCNForecaster(Forecaster):
"""
Example:
>>> #The dataset is split into x_train, x_val, x_test, y_train, y_val, y_test
>>> forecaster = TCNForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
...)
>>> forecaster.fit(x_train, y_train)
>>> forecaster.to_local() # if you set distributed=True
>>> test_pred = forecaster.predict(x_test)
>>> test_eval = forecaster.evaluate(x_test, y_test)
>>> forecaster.save({ckpt_name})
>>> forecaster.restore({ckpt_name})
"""
def __init__(self,
past_seq_len,
future_seq_len,
input_feature_num,
output_feature_num,
num_channels=[30]*7,
kernel_size=3,
repo_initialization=True,
dropout=0.1,
optimizer="Adam",
loss="mse",
lr=0.001,
metrics=["mse"],
seed=None,
distributed=False,
workers_per_node=1,
distributed_backend="torch_distributed"):
"""
Build a TCN Forecast Model.
TCN Forecast may fall into local optima. Please set repo_initialization
to False to alleviate the issue. You can also change a random seed to
work around.
:param past_seq_len: Specify the history time steps (i.e. lookback).
:param future_seq_len: Specify the output time steps (i.e. horizon).
:param input_feature_num: Specify the feature dimension.
:param output_feature_num: Specify the output dimension.
:param num_channels: Specify the convolutional layer filter number in
TCN's encoder. This value defaults to [30]*7.
:param kernel_size: Specify convolutional layer filter height in TCN's
encoder. This value defaults to 3.
:param repo_initialization: if to use framework default initialization,
True to use paper author's initialization and False to use the
framework's default initialization. The value defaults to True.
:param dropout: Specify the dropout close possibility (i.e. the close
possibility to a neuron). This value defaults to 0.1.
:param optimizer: Specify the optimizer used for training. This value
defaults to "Adam".
:param loss: Specify the loss function used for training. This value
defaults to "mse". You can choose from "mse", "mae" and
"huber_loss".
:param lr: Specify the learning rate. This value defaults to 0.001.
:param metrics: A list contains metrics for evaluating the quality of
forecasting. You may only choose from "mse" and "mae" for a
distributed forecaster. You may choose from "mse", "me", "mae",
"mse","rmse","msle","r2", "mpe", "mape", "mspe", "smape", "mdape"
and "smdape" for a non-distributed forecaster.
:param seed: int, random seed for training. This value defaults to None.
:param distributed: bool, if init the forecaster in a distributed
fashion. If True, the internal model will use an Orca Estimator.
If False, the internal model will use a pytorch model. The value
defaults to False.
:param workers_per_node: int, the number of worker you want to use.
The value defaults to 1. The param is only effective when
distributed is set to True.
:param distributed_backend: str, select from "torch_distributed" or
"horovod". The value defaults to "torch_distributed".
"""
# random seed setting
set_pytorch_seed(seed)
# config setting
self.data_config = {
"past_seq_len": past_seq_len,
"future_seq_len": future_seq_len,
"input_feature_num": input_feature_num,
"output_feature_num": output_feature_num
}
self.config = {
"lr": lr,
"loss": loss,
"num_channels": num_channels,
"kernel_size": kernel_size,
"repo_initialization": repo_initialization,
"optim": optimizer,
"dropout": dropout
}
self.metrics = metrics
# create internal implementation
self.internal = None
self.distributed = distributed
if self.distributed:
def model_creator_tcn(config):
set_pytorch_seed(seed)
model = model_creator({**self.config, **self.data_config})
model.train()
return model
self.internal = Estimator.from_torch(model=model_creator_tcn,
optimizer=optimizer_creator,
loss=loss_creator,
metrics=[ORCA_METRICS[name]()
for name in self.metrics],
backend=distributed_backend,
use_tqdm=True,
config={"lr": lr},
workers_per_node=workers_per_node)
else:
self.internal = TCNPytorch(check_optional_config=False)
def fit(self, x, y, epochs=1, batch_size=32):
# TODO: give an option to close validation during fit to save time.
"""
Fit(Train) the forecaster.
:param x: A numpy array with shape (num_samples, lookback, feature_dim).
lookback and feature_dim should be the same as past_seq_len and input_feature_num.
:param y: A numpy array with shape (num_samples, horizon, target_dim).
horizon and target_dim should be the same as future_seq_len and output_feature_num.
:param epochs: Number of epochs you want to train. The value defaults to 1.
:param batch_size: Number of batch size you want to train. The value defaults to 32.
:return: Evaluation results on validation data.
"""
# input check
validation_data = (x, y)
self.config["batch_size"] = batch_size
self._check_data(x, y)
# fit on internal
if self.distributed:
return self.internal.fit(data=np_to_creator((x, y)),
epochs=epochs,
batch_size=batch_size)
else:
return self.internal.fit_eval(data=(x, y),
validation_data=validation_data,
epochs=epochs,
metric=self.metrics[0], # only use the first metric
**self.config)
def _check_data(self, x, y):
assert self.data_config["past_seq_len"] == x.shape[-2], \
"The x shape should be (batch_size, past_seq_len, input_feature_num), \
Got past_seq_len of {} in config while x input shape of {}."\
.format(self.data_config["past_seq_len"], x.shape[-2])
assert self.data_config["future_seq_len"] == y.shape[-2], \
"The y shape should be (batch_size, future_seq_len, output_feature_num), \
Got future_seq_len of {} in config while y input shape of {}."\
.format(self.data_config["future_seq_len"], y.shape[-2])
assert self.data_config["input_feature_num"] == x.shape[-1],\
"The x shape should be (batch_size, past_seq_len, input_feature_num), \
Got input_feature_num of {} in config while x input shape of {}."\
.format(self.data_config["input_feature_num"], x.shape[-1])
assert self.data_config["output_feature_num"] == y.shape[-1], \
"The y shape should be (batch_size, future_seq_len, output_feature_num), \
Got output_feature_num of {} in config while y input shape of {}."\
.format(self.data_config["output_feature_num"], y.shape[-1])
def predict(self, x, batch_size=32):
"""
Predict using a trained forecaster.
if you want to predict on a single node(which is common practice), please call
.to_local().predict(x, ...)
:param x: A numpy array with shape (num_samples, lookback, feature_dim).
:param batch_size: predict batch size. The value will not affect predict
result but will affect resources cost(e.g. memory and time).
:return: A numpy array with shape (num_samples, lookback, feature_dim).
"""
if self.distributed:
# map input to a xshard
x = XShards.partition(x)
def transform_to_dict(train_data):
return {"x": train_data}
x = x.transform_shard(transform_to_dict)
# predict with distributed fashion
yhat = self.internal.predict(x, batch_size=batch_size)
# collect result from xshard to numpy
yhat = yhat.collect()
yhat = np.concatenate([yhat[i]['prediction'] for i in range(len(yhat))], axis=0)
if yhat.ndim == 2:
yhat = np.expand_dims(yhat, axis=2)
return yhat
else:
if not self.internal.model_built:
raise RuntimeError("You must call fit or restore first before calling predict!")
return self.internal.predict(x, batch_size=batch_size)
def predict_with_onnx(self, x, batch_size=32, dirname=None):
"""
Predict using a trained forecaster with onnxruntime. The method can only be
used when forecaster is a non-distributed version.
:param x: A numpy array with shape (num_samples, lookback, feature_dim).
:param batch_size: predict batch size. The value will not affect predict
result but will affect resources cost(e.g. memory and time).
:param dirname: The directory to save onnx model file. This value defaults
to None for no saving file.
:return: A numpy array with shape (num_samples, lookback, feature_dim).
"""
if self.distributed:
raise NotImplementedError("ONNX inference has not been supported for distributed\
forecaster. You can call .to_local() to transform the\
forecaster to a non-distributed version.")
if not self.internal.model_built:
raise RuntimeError("You must call fit or restore first before calling predict!")
return self.internal.predict_with_onnx(x, batch_size=batch_size, dirname=dirname)
def evaluate(self, x, y, batch_size=32, multioutput="raw_values"):
"""
Evaluate using a trained forecaster.
Please note that evaluate result is calculated by scaled y and yhat. If you scaled
your data (e.g. use .scale() on the TSDataset) please follow the following code
snap to evaluate your result if you need to evaluate on unscaled data.
if you want to evaluate on a single node(which is common practice), please call
.to_local().evaluate(x, y, ...)
>>> from zoo.automl.common.metrics import Evaluator
>>> y_hat = forecaster.predict(x)
>>> y_hat_unscaled = tsdata.unscale_numpy(y_hat) # or other customized unscale methods
>>> y_unscaled = tsdata.unscale_numpy(y) # or other customized unscale methods
>>> Evaluator.evaluate(metric=..., y_unscaled, y_hat_unscaled, multioutput=...)
:param x: A numpy array with shape (num_samples, lookback, feature_dim).
:param y: A numpy array with shape (num_samples, horizon, target_dim).
:param batch_size: evaluate batch size. The value will not affect evaluate
result but will affect resources cost(e.g. memory and time).
:param multioutput: Defines aggregating of multiple output values.
String in ['raw_values', 'uniform_average']. The value defaults to
'raw_values'.The param is only effective when the forecaster is a
non-distribtued version.
:return: A list of evaluation results. Each item represents a metric.
"""
if self.distributed:
return self.internal.evaluate(data=np_to_creator((x, y)),
batch_size=batch_size)
else:
if not self.internal.model_built:
raise RuntimeError("You must call fit or restore first before calling evaluate!")
return self.internal.evaluate(x, y, metrics=self.metrics,
multioutput=multioutput, batch_size=batch_size)
def evaluate_with_onnx(self, x, y,
batch_size=32,
dirname=None,
multioutput="raw_values"):
"""
Evaluate using a trained forecaster with onnxruntime. The method can only be
used when forecaster is a non-distributed version.
Please note that evaluate result is calculated by scaled y and yhat. If you scaled
your data (e.g. use .scale() on the TSDataset) please follow the following code
snap to evaluate your result if you need to evaluate on unscaled data.
>>> from zoo.automl.common.metrics import Evaluator
>>> y_hat = forecaster.predict(x)
>>> y_hat_unscaled = tsdata.unscale_numpy(y_hat) # or other customized unscale methods
>>> y_unscaled = tsdata.unscale_numpy(y) # or other customized unscale methods
>>> Evaluator.evaluate(metric=..., y_unscaled, y_hat_unscaled, multioutput=...)
:param x: A numpy array with shape (num_samples, lookback, feature_dim).
:param y: A numpy array with shape (num_samples, horizon, target_dim).
:param batch_size: evaluate batch size. The value will not affect evaluate
result but will affect resources cost(e.g. memory and time).
:param dirname: The directory to save onnx model file. This value defaults
to None for no saving file.
:param multioutput: Defines aggregating of multiple output values.
String in ['raw_values', 'uniform_average']. The value defaults to
'raw_values'.
:return: A list of evaluation results. Each item represents a metric.
"""
if self.distributed:
raise NotImplementedError("ONNX inference has not been supported for distributed\
forecaster. You can call .to_local() to transform the\
forecaster to a non-distributed version.")
if not self.internal.model_built:
raise RuntimeError("You must call fit or restore first before calling evaluate!")
return self.internal.evaluate_with_onnx(x, y,
metrics=self.metrics,
dirname=dirname,
multioutput=multioutput,
batch_size=batch_size)
def save(self, checkpoint_file):
"""
Save the forecaster.
:param checkpoint_file: The location you want to save the forecaster.
"""
if self.distributed:
self.internal.save(checkpoint_file)
else:
if not self.internal.model_built:
raise RuntimeError("You must call fit or restore first before calling save!")
self.internal.save(checkpoint_file)
def restore(self, checkpoint_file):
"""
restore the forecaster.
:param checkpoint_file: The checkpoint file location you want to load the forecaster.
"""
if self.distributed:
self.internal.load(checkpoint_file)
else:
self.internal.restore(checkpoint_file)
def to_local(self):
"""
Transform a distributed forecaster to a local (non-distributed) one.
Common practice is to use distributed training (fit) and predict/
evaluate with onnx or other frameworks on a single node. To do so,
you need to call .to_local() and transform the forecaster to a non-
distributed one.
The optimizer is refreshed, incremental training after to_local
might have some problem.
:return: a forecaster instance.
"""
# TODO: optimizer is refreshed, which is not reasonable
if not self.distributed:
raise RuntimeError("The forecaster has become local.")
model = self.internal.get_model()
state = {
"config": {**self.data_config, **self.config},
"model": model.state_dict(),
"optimizer": optimizer_creator(model, {"lr": self.config["lr"]}).state_dict(),
}
self.shutdown()
self.internal = TCNPytorch(check_optional_config=False)
self.internal.load_state_dict(state)
self.distributed = False
return self
def shutdown(self, force=False):
"""
Only used when you what to shut down a distributed forecaster's
workers and releases resources.
:param force: bool, if force to shut down the resources.
"""
if not self.distributed:
raise RuntimeError("A local forecaster does not need shutdown.")
self.internal.shutdown(force)
|
StarcoderdataPython
|
3231468
|
<gh_stars>0
from abc import ABC, abstractmethod
ALLOWED_EXTENSIONS = ['html', 'csv', 'mp3', 'mp4', 'txt']
class AbstractRenderer(ABC):
@abstractmethod
def render(self):
pass
class HTMLRenderer(AbstractRenderer):
def render(self):
print("Render using HTMLRemderer.")
class Mp4Renderer(AbstractRenderer):
def render(self):
print("Render mp3 streamer.")
class Mp3Renderer(AbstractRenderer):
def render(self):
print("Render mp4 streamer.")
class FileHandler:
created_files = list()
def __init__(self, filename):
self.filename = filename
@property
def extension(self):
return self.filename.split('.')[-1]
@classmethod
def create(cls, filename):
if filename.split('.')[-1] not in ALLOWED_EXTENSIONS:
print(f"{filename}: Not accepted")
return False
print(f"{filename}: Accepted successfully.")
FileHandler.created_files.append(filename)
return cls(filename)
def render(self):
handler_dict = {
"html": HTMLRenderer,
'mp3': Mp3Renderer,
'mp4': Mp4Renderer
}
handler = handler_dict[self.extension]
return handler().render()
if __name__ == "__main__":
f1 = FileHandler.create('doc.pdf')
f2 = FileHandler.create('doc.html')
f3 = FileHandler.create('doc.mp2')
f4 = FileHandler.create('doc.mp4')
for file_name in FileHandler.created_files:
FileHandler(file_name).render()
|
StarcoderdataPython
|
4835441
|
from models import Fetcher
class ArtisanAndRecipe():
def getArtisan(self, server="eu", artisanSlug='blacksmith',
locale="en_US"):
self.route = '/d3/data/artisan/{}'.format(artisanSlug)
return Fetcher.fetchData(
server=server, locale=locale, route=self.route)
def getRecipe(self,
server="eu",
artisanSlug='blacksmith',
recipeSlug='apprentice-flamberge',
locale="en_US"):
self.route = '/d3/data/artisan/{}/recipe/{}'.format(
artisanSlug, recipeSlug)
return Fetcher.fetchData(
server=server, locale=locale, route=self.route)
|
StarcoderdataPython
|
4822366
|
<gh_stars>1-10
#Only for use in Python 2.6.0a2 and later
from __future__ import print_function
import sys
import os
dirpath = os.path.dirname(os.path.abspath(__file__))
sys.path.append( '.' )
sys.path.append( dirpath + '/../' )
from linkedList.linkedList import LinkedList
class ArrayStack:
def __init__(self, data=None):
self.stack = [None] * 10 if data is None else [data] + [None] * 9
self.size = 1 if data is not None else 0
self.top = 0 if data is not None else -1
def isEmpty(self):
if self.top == -1:
return True
return False
def peek(self):
if self.isEmpty():
print("Stack is empty")
return None
return self.stack[self.top]
def push(self, x):
self.top += 1
if self.top > len(self.stack)-1:
self.resize()
self.stack[self.top] = x
self.size += 1
def pop(self):
if self.isEmpty():
print("Stack is empty")
return None
self.top -= 1
self.size -= 1
return self.stack[self.top+1]
def printStack(self):
print("Stack is:", end=" ")
for i in range(self.size):
print(self.stack[i], end=" ")
print("")
def resize(self):
newArr = [None] * (len(self.stack)*2)
for i in range(0, len(self.stack)):
newArr[i] = self.stack[i]
self.stack = newArr
class LinkedListStack:
def __init__(self, data=None):
self.stack = LinkedList(data)
self.size = self.stack.size
def isEmpty(self):
if self.size <= 0:
return True
return False
def peek(self):
return self.stack.head.data
def push(self, x):
self.stack.insertHead(x)
self.size += 1
def pop(self):
x = self.stack.head.data
self.stack.deleteHead()
return x
def printStack(self):
print("Stack", end="")
self.stack.printList()
def checkBalancedParentheses(s):
stack = ArrayStack()
for i in s:
if i == "(" or i == "{" or i == "[":
stack.push(i)
elif i == ")" or i == "}" or i =="]":
if stack.isEmpty():
return False
x = stack.pop()
if i == ")" and x != "(":
return False
if i == "}" and x != "{":
return False
if i == "]" and x != "]":
return False
if stack.isEmpty():
return True
else:
return False
def evalPostfix(exp):
s = LinkedListStack()
for i in exp:
if i == "*" or i == "/" or i == "+" or i == "-":
op2 = s.pop()
op1 = s.pop()
res = 0;
if i == "*":
res = op1 * op2
elif i == "/":
res = op1 / op2
elif i == "+":
res = op1 + op2
elif i == "-":
res = op1 - op2
s.push(res)
else:
s.push(int(i))
return s.pop()
def evalPrefix(exp):
s = LinkedListStack()
for x in range(len(exp), 0, -1):
i = exp[x - 1]
if i == "*" or i == "/" or i == "+" or i == "-":
op1 = s.pop()
op2 = s.pop()
res = 0;
if i == "*":
res = op1 * op2
elif i == "/":
res = op1 / op2
elif i == "+":
res = op1 + op2
elif i == "-":
res = op1 - op2
s.push(res)
else:
s.push(int(i))
return s.pop()
def infixToPostfix(exp):
s = ArrayStack()
res = []
for i in exp:
print("i is: ", end="")
print(i)
if i == "*" or i == "/" or i == "+" or i == "-":
while s.isEmpty() == False and s.peek() != "(" and getPrecedence(s.peek(), i) == s.peek():
res.append(s.pop())
print("appended ", end=" ")
print(i)
s.push(i)
print("pushed: ", end="")
print(i)
elif i == "(":
s.push(i)
elif i == ")":
while s.isEmpty() == False and s.peek() != "(":
res.append(s.pop())
s.pop()
else:
res.append(i)
while s.isEmpty() == False:
res.append(s.pop())
print("Whileloop after case")
arr = "".join(res)
return arr
def getPrecedence(x, y):
if x == y:
return x
elif x == "*" or x == "/" and y == "+" or y == "-":
return x
else:
return y
|
StarcoderdataPython
|
184682
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["setup", "update_list", "update_database", "write_tweet"]
import os
import json
import nltk
import tweepy
import string
import numpy as np
import cPickle as pickle
from collections import defaultdict
PROJECTNAME = "parrotization"
DATABASE_FILE = "{0}.pkl".format(PROJECTNAME)
SETTINGS_FILE = "{0}.json".format(PROJECTNAME)
START = "<S>"
STOP = "</S>"
def load_settings():
if os.path.exists(SETTINGS_FILE):
with open(SETTINGS_FILE, "r") as f:
settings = json.load(f)
else:
settings = {}
return settings
def save_settings(settings):
with open(SETTINGS_FILE, "w") as f:
json.dump(settings, f, indent=2)
def get_api():
settings = load_settings()
auth = tweepy.OAuthHandler(settings["consumer_key"],
settings["consumer_secret"])
auth.secure = True
auth.set_access_token(settings["user_key"], settings["user_secret"])
return tweepy.API(auth,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
def _default():
return defaultdict(int)
def load_db():
if not os.path.exists(DATABASE_FILE):
bigrams = defaultdict(_default)
trigrams = defaultdict(_default)
return (bigrams, trigrams)
with open(DATABASE_FILE, "r") as f:
return pickle.load(f)
def save_db(db):
with open(DATABASE_FILE, "wb") as f:
return pickle.dump(db, f, -1)
def setup(clobber=False):
settings = load_settings()
# Get the app information.
if (clobber or "consumer_key" not in settings or
"consumer_secret" not in settings):
print("Enter some info about your app")
settings["consumer_key"] = raw_input("Consumer key: ")
settings["consumer_secret"] = raw_input("Consumer secret: ")
# Authorize the user.
if clobber or "user_key" not in settings or "user_secret" not in settings:
auth = tweepy.OAuthHandler(settings["consumer_key"],
settings["consumer_secret"],
"oob")
url = auth.get_authorization_url()
print("Go to this URL:\n{0}".format(url))
pin = raw_input("Enter the PIN: ")
auth.get_access_token(pin)
settings["user_key"] = auth.access_token
settings["user_secret"] = auth.access_token_secret
save_settings(settings)
def update_list():
# Get the initial settings.
api = get_api()
settings = load_settings()
if "list_slug" not in settings:
settings["list_slug"] = api.create_list("cast").slug
save_settings(settings)
if "screen_name" not in settings:
settings["screen_name"] = api.me().screen_name
save_settings(settings)
# Add all the followers to the list.
owner, list_slug = settings["screen_name"], settings["list_slug"]
api.add_list_members(user_id=api.followers_ids(),
owner_screen_name=owner, slug=list_slug)
def update_database():
# Get all of the recent tweets in the timeline.
api = get_api()
settings = load_settings()
bigrams, trigrams = load_db()
owner, list_slug = api.me().screen_name, settings["list_slug"]
for tweet in tweepy.Cursor(api.list_timeline, owner_screen_name=owner,
since_id=settings.get("since_id", None),
include_rts=False,
slug=list_slug).items(1000):
# Tokenize the tweet.
text = tweet.text
a, b = "://", "URLURLURL"
text = text.replace(a, b)
tokens = [w.replace(b, a) for w in nltk.word_tokenize(text)]
tokens = [START, START]+tokens+[STOP, STOP]
# Update the id of the most recently seen tweet.
settings["since_id"] = max(tweet.id, settings.get("since_id", 0))
# Update the bigram and trigram dictionaries.
for i in range(2, len(tokens)):
bigrams[tokens[i-1]][tokens[i]] += 1
trigrams[tokens[i-2]+" "+tokens[i-1]][tokens[i]] += 1
# Save the database and the settings file.
save_db((bigrams, trigrams))
save_settings(settings)
def build_tweet(words, api, settings):
s = " "
for i, w in enumerate(words):
if i > 0 and words[i-1] == "@":
try:
f, _ = api.show_friendship(
source_screen_name=settings["screen_name"],
target_screen_name=w)
except tweepy.error.TweepError:
is_follower = False
else:
is_follower = f.followed_by
if is_follower:
s += w + " "
else:
s = s[:-1] + "." + w + " "
elif w.startswith("'") or w in ["n't"]:
s = s[:-1] + w + " "
elif not len(w.strip(string.punctuation)):
if w in ["(", "{", "@", "#", "&", "``"]:
s += w
else:
s = s[:-1] + w + " "
else:
s += w + " "
s = s.strip()
# Finally match any missing parens.
if "(" in s and ")" not in s:
s += ")"
if ")" in s and "(" not in s:
s = "(" + s
s = s.replace("``", "\"").replace("''", "\"")
return s
def write_tweet(alpha=0.6):
api = get_api()
settings = load_settings()
bigrams, trigrams = load_db()
tweet = [START, START]
while True:
b_prob = bigrams[tweet[-1]]
t_prob = trigrams[tweet[-2]+" "+tweet[-1]]
b_norm = sum(b_prob.values())
t_norm = sum(t_prob.values())
if b_norm < 1 or t_norm < 1:
continue
words, probs = [], []
for w in set(b_prob.keys()) | set(t_prob.keys()):
words.append(w)
probs.append(alpha * t_prob.get(w, 0.0)/t_norm
+ (1-alpha) * b_prob.get(w, 0.0)/b_norm)
word = np.random.choice(words, p=probs)
if word == STOP:
if len(tweet) > 6:
break
# Too short.
tweet = [START, START]
continue
tweet.append(word)
sent = build_tweet(tweet[2:], api, settings)
if len(sent) > 140:
# Too long.
tweet = [START, START]
return sent
if __name__ == "__main__":
import sys
if "setup" in sys.argv:
setup()
elif "update" in sys.argv:
update_list()
update_database()
elif "print" in sys.argv:
print(write_tweet())
elif "tweet" in sys.argv:
tweet = write_tweet()
print(tweet)
api = get_api()
api.update_status(tweet)
|
StarcoderdataPython
|
84540
|
import math
import torch
import torch.nn as nn
from torch.distributions import Normal
from torch.nn import init
FixedNormal = Normal
log_prob_normal = FixedNormal.log_prob
FixedNormal.log_probs = lambda self, actions: log_prob_normal(self, actions).sum(-1, keepdim=True)
entropy = FixedNormal.entropy
FixedNormal.entropy = lambda self: entropy(self).sum(-1)
FixedNormal.mode = lambda self: self.mean
class SiLU(nn.Module):
def __init__(self):
super().__init__()
def silu(input):
return input * torch.sigmoid(input)
def forward(self, input):
return self.silu(input)
class GuaussianAction(nn.Module):
def __init__(self, size_in, size_out):
super().__init__()
self.fc_mean = nn.Linear(size_in, size_out)
# ====== INITIALIZATION ======
self.fc_mean.weight.data.mul_(0.1)
self.fc_mean.bias.data.mul_(0.0)
self.logstd = torch.zeros(1, size_out)
def forward(self, x):
action_mean = self.fc_mean(x)
# print(action_mean.shape, self.logstd.shape)
return FixedNormal(action_mean, self.logstd.exp())
class NoisyLinear(nn.Module):
"""Factorised Gaussian NoisyNet"""
def __init__(self, in_features, out_features, sigma0=0.5):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.bias = nn.Parameter(torch.Tensor(out_features))
self.noisy_weight = nn.Parameter(
torch.Tensor(out_features, in_features))
self.noisy_bias = nn.Parameter(torch.Tensor(out_features))
self.noise_std = sigma0 / math.sqrt(self.in_features)
self.reset_parameters()
self.register_noise()
def register_noise(self):
in_noise = torch.FloatTensor(self.in_features)
out_noise = torch.FloatTensor(self.out_features)
noise = torch.FloatTensor(self.out_features, self.in_features)
self.register_buffer('in_noise', in_noise)
self.register_buffer('out_noise', out_noise)
self.register_buffer('noise', noise)
def sample_noise(self):
self.in_noise.normal_(0, self.noise_std)
self.out_noise.normal_(0, self.noise_std)
self.noise = torch.mm(
self.out_noise.view(-1, 1), self.in_noise.view(1, -1))
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.noisy_weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
self.noisy_bias.data.uniform_(-stdv, stdv)
def forward(self, x):
"""
Note: noise will be updated if x is not volatile
"""
normal_y = nn.functional.linear(x, self.weight, self.bias)
if self.training:
# update the noise once per update
self.sample_noise()
noisy_weight = self.noisy_weight * self.noise
noisy_bias = self.noisy_bias * self.out_noise
noisy_y = nn.functional.linear(x, noisy_weight, noisy_bias)
return noisy_y + normal_y
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) + ')'
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class BaseActorCriticNetwork(nn.Module):
def __init__(self, input_size, output_size, use_noisy_net=False, use_continuous=False):
super(BaseActorCriticNetwork, self).__init__()
if use_noisy_net:
linear = NoisyLinear
else:
linear = nn.Linear
self.use_continuous = use_continuous
# self.feature = nn.Sequential(
# linear(input_size, 128),
# nn.ReLU(),
# linear(128, 128),
# nn.ReLU()
# )
self.actor = nn.Sequential(
linear(input_size, 128),
nn.ReLU(),
linear(128, 64),
nn.ReLU(),
GuaussianAction(64, output_size) if use_continuous else linear(64, output_size)
)
self.critic = nn.Sequential(
linear(input_size, 128),
nn.ReLU(),
linear(128, 64),
nn.ReLU(),
linear(64, 1)
)
for p in self.modules():
if isinstance(p, nn.Conv2d):
init.xavier_normal_(p.weight)
p.bias.data.zero_()
if isinstance(p, nn.Linear):
init.xavier_normal_(p.weight)
p.bias.data.zero_()
def forward(self, state):
# x = self.feature(state)
policy = self.actor(state)
value = self.critic(state)
return policy, value
class DeepCnnActorCriticNetwork(nn.Module):
def __init__(self, input_size, output_size, use_noisy_net=False):
super(DeepCnnActorCriticNetwork, self).__init__()
if use_noisy_net:
print('use NoisyNet')
linear = NoisyLinear
else:
linear = nn.Linear
self.feature = nn.Sequential(
nn.Conv2d(in_channels=4, out_channels=32, kernel_size=4, stride=1),
nn.ReLU(),
nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=5,
stride=2),
nn.ReLU(),
nn.Conv2d(
in_channels=64,
out_channels=128,
kernel_size=4,
stride=1),
nn.ReLU(),
nn.Conv2d(
in_channels=128,
out_channels=256,
kernel_size=4,
stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=4),
nn.ReLU(),
Flatten(),
linear(50176, 512),
nn.ReLU()
)
self.actor = linear(512, output_size)
self.critic = linear(512, 1)
for p in self.modules():
if isinstance(p, nn.Conv2d):
init.kaiming_uniform_(p.weight)
p.bias.data.zero_()
if isinstance(p, nn.Linear):
init.kaiming_uniform_(p.weight, a=1.0)
p.bias.data.zero_()
def forward(self, state):
x = self.feature(state)
policy = self.actor(x)
value = self.critic(x)
return policy, value
class CnnActorCriticNetwork(nn.Module):
def __init__(self, input_size, output_size, use_noisy_net=False):
super(CnnActorCriticNetwork, self).__init__()
if use_noisy_net:
print('use NoisyNet')
linear = NoisyLinear
else:
linear = nn.Linear
self.feature = nn.Sequential(
nn.Conv2d(
in_channels=4,
out_channels=32,
kernel_size=8,
stride=4),
nn.LeakyReLU(),
nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=4,
stride=2),
nn.LeakyReLU(),
nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=3,
stride=1),
nn.LeakyReLU(),
Flatten(),
linear(
7 * 7 * 64,
512),
nn.LeakyReLU(),
)
self.actor = linear(512, output_size)
self.critic = linear(512, 1)
for p in self.modules():
if isinstance(p, nn.Conv2d):
init.kaiming_uniform_(p.weight)
p.bias.data.zero_()
if isinstance(p, nn.Linear):
init.kaiming_uniform_(p.weight, a=1.0)
p.bias.data.zero_()
def forward(self, state):
x = self.feature(state)
policy = self.actor(x)
value = self.critic(x)
return policy, value
class CuriosityModel(nn.Module):
def __init__(self, input_size, output_size):
super(CuriosityModel, self).__init__()
self.input_size = input_size
self.output_size = output_size
feature_output = 7 * 7 * 64
self.feature = nn.Sequential(
nn.Conv2d(
in_channels=4,
out_channels=32,
kernel_size=8,
stride=4),
nn.LeakyReLU(),
nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=4,
stride=2),
nn.LeakyReLU(),
nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=3,
stride=1),
nn.LeakyReLU(),
Flatten(),
)
self.inverse_net = nn.Sequential(
nn.Linear(feature_output * 2, 512),
nn.LeakyReLU(),
nn.Linear(512, output_size)
)
self.forward_net = nn.Sequential(
nn.Linear(output_size + feature_output, 512),
nn.LeakyReLU(),
nn.Linear(512, feature_output)
)
for p in self.modules():
if isinstance(p, nn.Conv2d):
init.kaiming_uniform_(p.weight)
p.bias.data.zero_()
if isinstance(p, nn.Linear):
init.kaiming_uniform_(p.weight, a=1.0)
p.bias.data.zero_()
def forward(self, inputs):
state, next_state, action = inputs
encode_state = self.feature(state)
# get pred action
pred_action = torch.cat((encode_state, self.feature(next_state)), 1)
pred_action = self.inverse_net(pred_action)
# ---------------------
# get pred next state
pred_next_state_feature = torch.cat((encode_state, action), 1)
pred_next_state_feature = self.forward_net(pred_next_state_feature)
real_next_state_feature = self.feature(next_state)
return real_next_state_feature, pred_next_state_feature, pred_action
|
StarcoderdataPython
|
1774883
|
#!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from nose.tools import eq_
from ycm.completers.all import identifier_completer
def GetCursorIdentifier_StartOfLine_test():
eq_( 'foo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': 'foo'
} ) )
eq_( 'fooBar',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': 'fooBar'
} ) )
def GetCursorIdentifier_EndOfLine_test():
eq_( 'foo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 2,
'line_value': 'foo'
} ) )
def GetCursorIdentifier_PastEndOfLine_test():
eq_( '',
identifier_completer._GetCursorIdentifier(
{
'column_num': 10,
'line_value': 'foo'
} ) )
def GetCursorIdentifier_NegativeColumn_test():
eq_( '',
identifier_completer._GetCursorIdentifier(
{
'column_num': -10,
'line_value': 'foo'
} ) )
def GetCursorIdentifier_StartOfLine_StopsAtNonIdentifierChar_test():
eq_( 'foo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': 'foo(goo)'
} ) )
def GetCursorIdentifier_AtNonIdentifier_test():
eq_( 'goo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 3,
'line_value': 'foo(goo)'
} ) )
def GetCursorIdentifier_WalksForwardForIdentifier_test():
eq_( 'foo',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': ' foo'
} ) )
def GetCursorIdentifier_FindsNothingForward_test():
eq_( '',
identifier_completer._GetCursorIdentifier(
{
'column_num': 4,
'line_value': 'foo ()***()'
} ) )
def GetCursorIdentifier_SingleCharIdentifier_test():
eq_( 'f',
identifier_completer._GetCursorIdentifier(
{
'column_num': 0,
'line_value': ' f '
} ) )
def GetCursorIdentifier_StartsInMiddleOfIdentifier_test():
eq_( 'foobar',
identifier_completer._GetCursorIdentifier(
{
'column_num': 3,
'line_value': 'foobar'
} ) )
def GetCursorIdentifier_LineEmpty_test():
eq_( '',
identifier_completer._GetCursorIdentifier(
{
'column_num': 11,
'line_value': ''
} ) )
|
StarcoderdataPython
|
59162
|
<gh_stars>0
from django.db import models
from django.utils.translation import pgettext_lazy
from saleor.core.permissions import MODELS_PERMISSIONS
# Add in the permissions specific to our models.
MODELS_PERMISSIONS += [
'collection_extensions.view',
'collection_extensions.edit'
]
class CollectionExtension(models.Model):
collection = models.OneToOneField(
'product.Collection', on_delete=models.CASCADE,
related_name='extension')
alternative_name = models.CharField(max_length=255, blank=True)
content = models.TextField(help_text=pgettext_lazy(
'Collection extension', 'CMS-able content.'), blank=True)
added = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = 'collection_extensions'
permissions = (
('view', pgettext_lazy('Permission description',
'Can view collection extensions')
),
('edit', pgettext_lazy('Permission description',
'Can edit collection extensions')))
def __str__(self):
return self.collection.name
|
StarcoderdataPython
|
3251918
|
import os
import pandas
import math
import ntpath
from BirdRoostLocation.ReadData import Labels
import numpy as np
from BirdRoostLocation import utils
from BirdRoostLocation.PrepareData import NexradUtils
from BirdRoostLocation import LoadSettings as settings
from BirdRoostLocation.BuildModels.CNN import model as shallow_model
import tensorflow as tf
from keras.models import model_from_json
import keras
class Batch_Generator:
"""This class organized the machine learning labels and creates ML batches.
Class Variables:
self.root_dir: The directory where the radar images are stored
self.ml_sets: A dictionary containing a list of files that are part of
the given ml set
self.batch_size: the size of the minibatch learning batches
self.label_dict: A dictionary of the labels, the key is the filename,
and the value is a ML_Label object.
"""
def __init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=settings.DEFAULT_BATCH_SIZE,
root_dir=utils.RADAR_IMAGE_DIR,
):
self.label_dict = {}
self.root_dir = root_dir
self.no_roost_sets = {}
self.roost_sets = {}
self.no_roost_sets_V06 = {}
self.roost_sets_V06 = {}
self.batch_size = default_batch_size
print("ML LABEL CSV")
print(ml_label_csv)
self.__set_ml_sets(ml_split_csv, validate_k_index, test_k_index)
def __set_ml_sets(self, ml_split_csv, validate_k_index, test_k_index):
"""Create Train, test, and Validation set from k data folds.
The k data folds are saved out to ml_split_csv. The fold at the given
test and train indices as set to their corresponding set. The rest
of the data is put into train. This method will initialize the following
class variables: self.train, self.validation, and self.test. Each of
these contains a list of filenames that correspond with the set.
Args:
ml_split_csv: A path to a csv file, where the csv has three columns,
'AWS_file', 'Roost', and 'split_index'.
validate_k_index: The index of the validation set.
test_k_index: The index of the test set.
"""
print("ML SPLIT CSV")
print(ml_split_csv)
ml_split_pd = pandas.read_csv(ml_split_csv)
# Remove files that weren't found
all_files = utils.getListOfFilesInDirectory(self.root_dir + "data", ".png")
print("ROOT DIR")
print(self.root_dir + "data")
all_files_dict = {}
for i in range(len(all_files)):
all_files_dict[os.path.basename(all_files[i])[2:25]] = True
for index, row in ml_split_pd.iterrows():
if all_files_dict.get(row["AWS_file"]) is None:
ml_split_pd.drop(index, inplace=True)
print("LENGTHS OF NO ROOST/ROOST:")
print(len(ml_split_pd[ml_split_pd.Roost != True]))
print(len(ml_split_pd[ml_split_pd.Roost]))
print("BEFORE self.__set_ml_sets_helper - NO ROOST")
self.__set_ml_sets_helper(
self.no_roost_sets,
self.no_roost_sets_V06,
ml_split_pd[ml_split_pd.Roost != True],
validate_k_index,
test_k_index,
)
print("AFTER self.__set_ml_sets_helper - NO ROOST")
self.__set_ml_sets_helper(
self.roost_sets,
self.roost_sets_V06,
ml_split_pd[ml_split_pd.Roost],
validate_k_index,
test_k_index,
)
print("AFTER self.__set_ml_sets_helper - ROOST")
def __set_ml_sets_helper(self, ml_sets, ml_sets_V06, ml_split_pd, val_k, test_k):
no_val_pd = ml_split_pd[ml_split_pd.split_index != val_k]
ml_sets[utils.ML_Set.training] = list(
no_val_pd[no_val_pd.split_index != test_k]["AWS_file"]
)
ml_sets[utils.ML_Set.validation] = list(
ml_split_pd[ml_split_pd.split_index == val_k]["AWS_file"]
)
ml_sets[utils.ML_Set.testing] = list(
ml_split_pd[ml_split_pd.split_index == test_k]["AWS_file"]
)
print("ml_sets[utils.ML_Set....]")
for key in list(ml_sets.keys()):
ml_sets_V06[key] = []
for item in ml_sets[key]:
if int(item[-1]) >= 6:
ml_sets_V06[key].append(item)
np.random.shuffle(ml_sets[key])
np.random.shuffle(ml_sets_V06[key])
def get_batch_indices(self, ml_sets, ml_set, num_temporal_data=0):
indices = np.random.randint(
low=0, high=len(ml_sets[ml_set]), size=int(self.batch_size / 2)
)
return indices
def get_batch(self, ml_set, dualPol, radar_product=None):
ground_truths = []
train_data = []
filenames = []
roost_sets = self.roost_sets
no_roost_sets = self.no_roost_sets
if dualPol:
roost_sets = self.roost_sets_V06
no_roost_sets = self.no_roost_sets_V06
return train_data, ground_truths, filenames, roost_sets, no_roost_sets
def single_product_batch_param_helper(
self,
filename,
filenames,
radar_product,
problem,
model_type,
train_data,
ground_truths,
images,
):
is_roost = int(self.label_dict[filename][0].is_roost)
polar_radius = [
float(self.label_dict[filename][i].polar_radius)
for i in range(len(self.label_dict[filename]))
]
polar_theta = [
float(self.label_dict[filename][i].polar_theta)
for i in range(len(self.label_dict[filename]))
]
roost_size = [
float(self.label_dict[filename][i].radius)
for i in range(len(self.label_dict[filename]))
]
if images != []:
if problem == "detection":
if np.array(train_data).size == 0:
train_data = images
train_data = np.array(train_data)
else:
train_data = np.concatenate((train_data, np.array(images)), axis=0)
if np.array(ground_truths).size == 0:
ground_truths = [[is_roost, 1 - is_roost]] * np.array(images).shape[
0
]
else:
ground_truths = np.concatenate(
(
ground_truths,
[[is_roost, 1 - is_roost]] * np.array(images).shape[0],
),
axis=0,
)
print("ground truths shape")
print(np.array(ground_truths).shape)
else: # localization
all_radii = np.array([])
all_thetas = np.array([])
for k in range(len(polar_radius)):
radii = np.array([polar_radius[k]] * np.array(images).shape[0])
if not np.isnan(np.sum(radii)):
mask_radii = [(radius / 300) * (240 / 2) for radius in radii]
thetas = []
for i in range(len(images)):
thetas.append(
adjustTheta(
self,
polar_theta[k],
self.label_dict[filename][0].images[radar_product][
i
],
)
)
all_radii = np.append(all_radii, np.array(mask_radii))
all_thetas = np.append(all_thetas, np.array(thetas))
all_radii = np.reshape(
all_radii,
(
len(self.label_dict[filename]),
int(len(all_radii) / len(self.label_dict[filename])),
),
)
all_thetas = np.reshape(
all_thetas,
(
len(self.label_dict[filename]),
int(len(all_thetas) / len(self.label_dict[filename])),
),
)
masks = np.zeros((len(all_radii[0]), 240, 240))
if type(roost_size) != float or math.isnan(roost_size):
roost_size = 28.0
else:
roost_size = roost_size / 1000 # convert to km
mask_roost_size = (roost_size / 300) * (240 / 2)
vconvert_to_cart = np.vectorize(convert_to_cart)
try:
cart_x, cart_y = vconvert_to_cart(all_radii, all_thetas)
except ValueError as e:
return train_data, ground_truths
for k in range(cart_x.shape[0]):
for j in range(cart_x.shape[1]):
try:
masks[j][
120 - int(round(cart_y[k][j])),
120 + int(round(cart_x[k][j])),
] = 1.0
color_pts = points_in_circle_np(
mask_roost_size,
y0=120 - int(round(cart_y[k][j])),
x0=120 + int(round(cart_x[k][j])),
)
for pt in color_pts:
masks[j][pt[0], pt[1]] = 1.0
except IndexError as e:
pass
if np.array(train_data).size == 0:
train_data = images
train_data = np.array(train_data)
else:
train_data = np.concatenate((train_data, np.array(images)), axis=0)
if np.array(ground_truths).size == 0:
ground_truths = masks
else:
ground_truths = np.concatenate((ground_truths, masks), axis=0)
train_data = np.array(train_data)
return train_data, ground_truths
def single_product_batch_params(
self,
ground_truths,
train_data,
filenames,
roost_sets,
no_roost_sets,
ml_set,
radar_product,
model_type,
problem,
is_eval=False,
):
extended_filenames = np.array([])
print("FILENAMES")
print(filenames)
if filenames == []:
for ml_sets in [roost_sets, no_roost_sets]:
if ml_sets[ml_set]: # in case you only train on true or false labels
indices = Batch_Generator.get_batch_indices(self, ml_sets, ml_set)
for i, index in enumerate(indices):
filename = ml_sets[ml_set][index]
print(filename)
if filename not in extended_filenames:
images = self.label_dict[filename][0].get_image(
radar_product
)
if images != []:
train_data, ground_truths = Batch_Generator.single_product_batch_param_helper(
self,
filename,
filenames,
radar_product,
problem,
model_type,
train_data,
ground_truths,
images,
)
#### !!!!
if is_eval == False:
extended_filenames = np.append(
extended_filenames, filename
)
else:
extended_filenames = np.append(
extended_filenames,
[filename]
* (len(train_data) - len(extended_filenames)),
)
else:
for filename in filenames:
images = self.label_dict[filename][0].get_image(radar_product)
if images != []:
train_data, ground_truths = Batch_Generator.single_product_batch_param_helper(
self,
filename,
filenames,
radar_product,
problem,
model_type,
train_data,
ground_truths,
images,
)
### !!!!
if is_eval == False:
extended_filenames = np.append(extended_filenames, filename)
else:
extended_filenames = np.append(
extended_filenames,
[filename] * (len(train_data) - len(extended_filenames)),
)
truth_shape = np.array(ground_truths).shape
print("truth shape: ")
print(truth_shape)
try:
if problem == "detection":
ground_truths = np.array(ground_truths).reshape(
truth_shape[0], truth_shape[1]
)
train_data_np = np.array(train_data)
shape = train_data_np.shape
train_data_np = train_data_np.reshape(
shape[0], shape[1], shape[2], shape[3]
)
print("RETURN SHAPES")
print(train_data_np.shape)
print(ground_truths.shape)
print(extended_filenames.shape)
return train_data_np, np.array(ground_truths), np.array(extended_filenames)
except IndexError as e:
print(e)
return None, None, None
class Single_Product_Batch_Generator(Batch_Generator):
def __init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=settings.DEFAULT_BATCH_SIZE,
root_dir=utils.RADAR_IMAGE_DIR,
high_memory_mode=False,
):
Batch_Generator.__init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index,
test_k_index,
default_batch_size,
root_dir,
)
ml_label_pd = pandas.read_csv(ml_label_csv)
for _, row in ml_label_pd.iterrows():
if row["AWS_file"] not in self.label_dict:
self.label_dict[row["AWS_file"]] = []
self.label_dict[row["AWS_file"]].append(
Labels.ML_Label(row["AWS_file"], row, self.root_dir, high_memory_mode)
)
def get_batch(
self,
ml_set,
dualPol,
radar_product=None,
num_temporal_data=0,
model_type="cnn",
problem="detection",
filenames=[],
is_eval=False,
):
"""Get a batch of data for machine learning. As a default, a batch
contains data from a single radar product.
Args:
ml_set: ML_Set enum value, train, test, or validation.
radar_product: Radar_Product enum value, reflectivity, velocity,
zdr, or rho_hv.
Returns:
train_data, ground_truth, filenames:
The ground truth is an array of batch size, where each item
in the array contains a single ground truth label.
The train_data is an array of images, corresponding to the
ground truth values.
filenames is an array of filenames, corresponding to the
ground truth values.
"""
if len(filenames) == 0:
ground_truths, train_data, filenames, roost_sets, no_roost_sets = Batch_Generator.get_batch(
self, ml_set, dualPol, radar_product
)
else:
ground_truths, train_data, _, roost_sets, no_roost_sets = Batch_Generator.get_batch(
self, ml_set, dualPol, radar_product
)
return Batch_Generator.single_product_batch_params(
self,
ground_truths,
train_data,
filenames,
roost_sets,
no_roost_sets,
ml_set,
radar_product,
model_type,
problem,
is_eval,
)
class Multiple_Product_Batch_Generator(Batch_Generator):
def __init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=settings.DEFAULT_BATCH_SIZE,
root_dir=utils.RADAR_IMAGE_DIR,
high_memory_mode=False,
):
Batch_Generator.__init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index,
test_k_index,
default_batch_size,
root_dir,
)
print("after Batch_Generator.__init__")
print(ml_label_csv)
ml_label_pd = pandas.read_csv(ml_label_csv)
print(ml_label_pd.shape)
for _, row in ml_label_pd.iterrows():
if row["AWS_file"] not in self.label_dict:
self.label_dict[row["AWS_file"]] = [
Labels.ML_Label(
row["AWS_file"], row, self.root_dir, high_memory_mode
)
]
else:
self.label_dict[row["AWS_file"]].append(
Labels.ML_Label(
row["AWS_file"], row, self.root_dir, high_memory_mode
)
)
# channels will be RGB values, first dimension will be radar products
def get_batch(
self,
ml_set,
dualPol,
batch_size=settings.DEFAULT_BATCH_SIZE,
loaded_models=None,
num_temporal_data=0,
model_type="cnn",
problem="detection",
):
"""Get a batch of data for machine learning. This batch contains data
with four channels in it, one for each radar product. For dualPol data
this will be four radar products, and for legacy data this will be two
radar products.
Args:
ml_set: ML_Set enum value, train, test, or validation.
dualPol: Boolean, true if the data is dual pol, false if the radar
data is legacy.
Returns:
train_data, ground_truth, filenames:
The ground truth is an array of batch size, where each item
in the array contains a single ground truth label.
The train_data is an array of images, corresponding to the
ground truth values.
filenames is an array of filenames, corresponding to the
ground truth values.
"""
ground_truths, train_data, filenames, roost_sets, no_roost_sets = Batch_Generator.get_batch(
self, ml_set, dualPol, radar_product=None
)
train_list = []
truth_list = []
pred_list = []
file_list = []
radar_products = [
utils.Radar_Products.cc,
utils.Radar_Products.diff_reflectivity,
utils.Radar_Products.reflectivity,
utils.Radar_Products.velocity,
]
for k, product in enumerate(radar_products):
print(product)
print("BEFORE")
print(len(filenames))
train, truth, filenames = Batch_Generator.single_product_batch_params(
self,
ground_truths,
train_data,
filenames,
roost_sets,
no_roost_sets,
ml_set,
product,
model_type,
problem,
)
print("AFTER")
print(len(filenames))
print("train.shape")
print(np.array(train).shape)
print("truth.shape")
print(np.array(truth).shape)
print("filenames.shape")
print(np.array(filenames).shape)
predictions = np.array([])
for i in range(0, len(train), batch_size):
train_batch = []
for j in range(0, batch_size):
if (i + j) < len(train):
train_batch.append(train[i + j])
train_batch = np.array(train_batch)
if len(train_batch) > 0:
if problem == "detection":
pred = loaded_models[k].predict_proba(train_batch)
else:
pred = loaded_models[k].predict(train_batch)
predictions = np.append(predictions, np.array(pred))
predictions = np.reshape(predictions, (-1, 240, 240))
print("predictions.shape")
print(predictions.shape)
train_list.append(np.array(train))
truth_list.append(np.array(truth))
file_list.append(np.array(filenames))
print("train_list.shape")
print(np.array(train_list).shape)
print("truth_list.shape")
print(np.array(truth_list).shape)
print("file_list.shape")
print(np.array(file_list).shape)
print("predictions.shape")
print(np.array(predictions).shape)
# try:
if problem == "detection":
predictions = np.reshape(
predictions, (np.array(truth_list).shape[1], 2)
)
else:
predictions = np.reshape(predictions, (-1, 240, 240))
print(np.array(truth_list).shape)
pred_list.append(predictions)
print("train_list, truth_list, pred_list, file_list")
print(np.array(train_list).shape)
print(np.array(truth_list).shape)
print(np.array(pred_list).shape)
print(np.array(file_list).shape)
return (
np.array(train_list),
np.array(truth_list),
np.array(pred_list),
np.array(file_list),
)
def normalize(self, x, maxi, mini):
if type(x) is list:
return [(y - mini) / (maxi - mini) for y in x]
else:
return (x - mini) / (maxi - mini)
def adjustTheta(self, theta, path):
filename = os.path.splitext(ntpath.basename(path))[0]
parts = filename.split("_")
if "flip" in parts:
if theta > 180.0:
theta = 540 - theta
else:
theta = 180 - theta
# rotation
try:
if "noise" in parts:
degree_offset = int(parts[-2])
else:
degree_offset = int(parts[-1])
theta += degree_offset
except ValueError:
return theta
return theta
def convert_to_cart(radius, theta):
return radius * math.cos(theta), radius * math.sin(theta)
def points_in_circle_np(radius, y0=0, x0=0):
x_ = np.arange(x0 - radius - 1, x0 + radius + 1, dtype=int)
y_ = np.arange(y0 - radius - 1, y0 + radius + 1, dtype=int)
y, x = np.where((y_[:, np.newaxis] - y0) ** 2 + (x_ - x0) ** 2 <= radius ** 2)
for y, x in zip(y_[y], x_[x]):
yield y, x
|
StarcoderdataPython
|
3391849
|
#!/usr/bin/env python3
"""PyTest tests for the get_datatype.py module.
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'amoebaelib')) # Customize.
from data.datatype_test_seqs import testseq1
from get_datatype import \
get_datatype_for_sequence_string, \
get_dbtype
def test_get_datatype_for_sequence_string():
"""Test the get_datatype_for_sequence_string function in the get_datatype.py file.
"""
##########################
# Arrange.
x1 = 'ATGC'*3 + 'Y'
x2 = 'atgc'*3 + 'y'
x3 = 'ATGCV'
x4 = 'atgcv'
x5 = 'tccaaaaaatcgaaTTATYttattccccaccttcttttctcattttttga'
x6 = testseq1
##########################
# Act.
dbtype_1 = get_datatype_for_sequence_string(x1)
dbtype_2 = get_datatype_for_sequence_string(x2)
dbtype_3 = get_datatype_for_sequence_string(x3)
dbtype_4 = get_datatype_for_sequence_string(x4)
dbtype_5 = get_datatype_for_sequence_string(x5)
dbtype_6 = get_datatype_for_sequence_string(x6)
##########################
# Assert.
assert dbtype_1 == 'nucl'
assert dbtype_2 == 'nucl'
assert dbtype_3 == 'prot'
assert dbtype_4 == 'prot'
assert dbtype_5 == 'nucl'
assert dbtype_6 == 'nucl'
def test_get_dbtype(): # ***Incomplete test
"""Test the get_dbtype function in the get_datatype.py file.
"""
##########################
# Arrange.
f = "f"
##########################
# Act.
#x = get_dbtype(f)
##########################
# Assert.
assert True == True # ***Temporary.
|
StarcoderdataPython
|
188799
|
'''
URL: https://leetcode.com/problems/minimum-distance-between-bst-nodes/description/
Time complexity: O(n)
Space complexity: O(n)
'''
class Solution(object):
def minDiffInBST(self, root):
"""
:type root: TreeNode
:rtype: int
"""
sorted_lst = []
self.sort_vals(root, sorted_lst)
min_diff = -1
for i in range(len(sorted_lst)-1):
curr_diff = abs(sorted_lst[i] - sorted_lst[i+1])
if min_diff == -1:
min_diff = curr_diff
else:
min_diff = min(min_diff, curr_diff)
return min_diff
def sort_vals(self, node, sorted_lst):
if node is None:
return None
self.sort_vals(node.left, sorted_lst)
sorted_lst.append(node.val)
self.sort_vals(node.right, sorted_lst)
|
StarcoderdataPython
|
186478
|
<reponame>sifrovacky-cz/kachna
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
#User profile model, one to one relation to User model
#Note: User = team, participants = team members
class UserProfile (models.Model):
user = models.OneToOneField(User, on_delete = models.CASCADE)
#participants
participant_one = models.CharField(max_length=100)
participant_two = models.CharField(max_length=100,blank=True)
participant_three = models.CharField(max_length=100,blank=True)
participant_four = models.CharField(max_length=100,blank=True)
def __str__(self):
return self.user.username
|
StarcoderdataPython
|
176354
|
<filename>Crawler/filecrawler.py<gh_stars>0
from apiproxy import ApiProxy, RestApiResponse
from model import AmbarFileMeta,AmbarCrawlerSettings
from logger import AmbarLogger
from abc import *
from hashlib import sha256
import hashlib
import re
class FileCrawler:
def __init__(self, ApiProxy, CrawlerSettings):
"""Initializing file crawler
CrawlerSettings - AmbarCrawlerSettings object from model
ApiProxy - initilized ApiProxy object
"""
self.settings = CrawlerSettings
self.apiProxy = ApiProxy
self.regex = re.compile(self.settings.file_regex, re.I)
self.logger = AmbarLogger(self.apiProxy, self.settings.uid, self.settings.verbose)
self.logger.LogMessage('info', 'filecrawler initialized'.format(self.settings.uid))
@abstractmethod
def Crawl(self):
"""Crawling method
Should call ProcessFileCallback() on every file that should be processed
"""
pass
def ProcessFileCallback(self, FileDataStream, FileSize, CreateTime, UpdateTime, ShortName, FullName):
"""Callback method for file crawler
FileDataStream - ByteIO object
"""
fileMeta = AmbarFileMeta.Init(CreateTime, UpdateTime, ShortName, FullName, self.settings.id)
sha = sha256(FileDataStream.getvalue()).hexdigest()
## checking content existance
apiResp = self.apiProxy.CheckIfAmbarFileParsedContentExists(sha)
if not apiResp.Success:
self.logger.LogMessage('error', 'error checking content existance {0} {1}'.format(fileMeta.full_name, apiResp.message))
return
if not (apiResp.Found or apiResp.NotFound):
self.logger.LogMessage('error', 'unexpected response on checking content existance {0} {1} {2}'.format(fileMeta.full_name, apiResp.code, apiResp.message))
return
if apiResp.NotFound:
self.logger.LogMessage('verbose', 'content not found {0}'.format(fileMeta.full_name))
## creating content
createContentApiResp = self.apiProxy.CreateAmbarFileContent(FileDataStream, sha)
if not createContentApiResp.Success:
self.logger.LogMessage('error', 'error creating content {0} {1}'.format(fileMeta.full_name, createContentApiResp.message))
return
if not (createContentApiResp.Found or createContentApiResp.Created):
self.logger.LogMessage('error', 'unexpected response on create content {0} {1} {2}'.format(fileMeta.full_name, createContentApiResp.code, createContentApiResp.message))
return
if createContentApiResp.Found:
self.logger.LogMessage('verbose', 'content found {0}'.format(fileMeta.full_name))
if createContentApiResp.Created:
self.logger.LogMessage('verbose', 'content created {0}'.format(fileMeta.full_name))
if apiResp.Found:
self.logger.LogMessage('verbose', 'content found {0}'.format(fileMeta.full_name))
## sending meta to pipeline
apiResp = self.apiProxy.AddAmbarFileMeta(fileMeta, sha, FileSize, self.settings.index_name, self.settings.uid)
if not apiResp.Success:
self.logger.LogMessage('error', 'error adding meta {0} {1}'.format(fileMeta.full_name, apiResp.message))
return
if apiResp.BadRequest:
self.logger.LogMessage('verbose', 'bad meta, ignoring... {0}'.format(fileMeta.full_name))
return
if apiResp.InsufficientStorage:
raise Exception('insufficient storage')
return
if not apiResp.Ok:
self.logger.LogMessage('error', 'unexpected response on adding meta {0} {1} {2}'.format(fileMeta.full_name, apiResp.code, apiResp.message))
return
self.logger.LogMessage('verbose', 'meta added {0}'.format(fileMeta.full_name))
def TurboCheckMetaExistanceCallback(self, CreateTime, UpdateTime, ShortName, FullName):
"""Callback method for turbo checking meta existance
"""
amFileMeta = AmbarFileMeta.Init(CreateTime, UpdateTime, ShortName, FullName, self.settings.id)
apiResp = self.apiProxy.CheckIfAmbarFileMetaExists(amFileMeta, self.settings.index_name)
if not apiResp.Success:
self.logger.LogMessage('error', 'error checking meta existance {0} {1}'.format(FullName, apiResp.message))
return None
if apiResp.Ok:
return True
if apiResp.NotFound:
return False
self.logger.LogMessage('error', 'unexpected response on turbo check meta existance {0} {1} {2}'.format(FullName, apiResp.code, apiResp.message))
return None
|
StarcoderdataPython
|
45320
|
<filename>elationmagic.py
"""
@copyright: 2013 Single D Software - All Rights Reserved
@summary: Elation Magic 260 MIDI interface for Light Maestro.
"""
# Standard library imports
import logging
# Additional library imports
import rtmidi
import rtmidi.midiconstants
# Application imports
import console
# Named logger for this module
_logger = logging.getLogger(__name__)
class ElationMagic(console.Console):
"""The console class that communicates with the Elation Magic 260."""
def _sendmidi(self, channel, note):
try:
self._midi.send_message((rtmidi.midiconstants.NOTE_ON | channel, note, 127))
_logger.debug('Sent note {0} to channel {1}'.format(note, channel))
except RuntimeError:
raise console.CommunicationError
def getstatus(self):
"""
Provide status information for the connection to the console.
@return: Dictionary containing status information
"""
status = super().getstatus()
status['condition'] = 'operational' if self._midi else 'nonoperational'
return status
def getchannels(self):
raise console.NotSupportedError
def loadchannels(self, data, sceneid=None):
raise console.NotSupportedError
def getscenes(self):
raise console.NotSupportedError
def getscene(self, sceneid):
raise console.NotSupportedError
def loadscene(self, sceneid):
try:
channel, note = divmod(int(sceneid) - 1, 72)
self._sendmidi(channel, note)
except ValueError:
_logger.warning('Non-numeric scenes are not supported.')
def savescene(self, sceneid, fade=5, scene=None):
raise console.NotSupportedError
def deletescene(self, sceneid):
raise console.NotSupportedError
def __init__(self, parameter='USB'):
self._midi = rtmidi.MidiOut()
for p, portname in enumerate(self._midi.get_ports()):
if parameter in portname:
self._midi.open_port(p)
_logger.info('Connected to MIDI device "{0}"'.format(self._midi.get_port_name(p)))
super().__init__()
return
_logger.warning('No USB MIDI device found')
|
StarcoderdataPython
|
3389691
|
#!/usr/bin/env python3
import asyncio
import logging
from random import randint
import aiohttp
from asyncpraw import Reddit
from asyncpraw.models import Comment
from asyncprawcore.exceptions import ServerError
from dynaconf import Dynaconf
from discord_logging import DiscordWebhookHandler
config = Dynaconf(settings_files=["settings.toml", ".secrets.toml"])
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(fmt="[{levelname}] {message}", style="{"))
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
if config.webhook:
discord_handler = DiscordWebhookHandler(config.webhook, min_emit_interval=0.0)
discord_handler.setFormatter(
logging.Formatter(
fmt="[{levelname} | {asctime}] {message}",
datefmt="%Y-%m-%d %H:%M:%S %Z",
style="{",
)
)
discord_handler.setLevel(logging.INFO)
logger.addHandler(discord_handler)
url = f"https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze?key={config.credentials.perspective.api_key}"
params = {
"languages": ["en"],
"requestedAttributes": {
"TOXICITY": {},
"SEVERE_TOXICITY": {},
"IDENTITY_ATTACK": {},
"INSULT": {},
"THREAT": {},
},
"communityId": f"reddit.com/r/{config.subreddit}",
}
async def authenticate_reddit(username: str) -> Reddit:
reddit_instance = Reddit(
username=username,
user_agent=f"web:mod.{config.subreddit}.{username}.Perspective:v{config.version} by {config.author})",
**config.credentials[username],
)
logger.info(f"Authenticated as {await reddit_instance.user.me()}!")
return reddit_instance
async def main():
mod_reddit = await authenticate_reddit(config.mod_username)
stream_reddit = await authenticate_reddit(config.stream_username)
subreddit = await stream_reddit.subreddit(config.subreddit)
while True:
try:
async for comment in subreddit.stream.comments(skip_existing=False):
await process_comment(comment, mod_reddit)
except ServerError as e:
sleep_duration = randint(25, 35)
logger.warning(f"Server error, retrying in {sleep_duration}s", exc_info=e)
await asyncio.sleep(sleep_duration)
except Exception as e:
logger.error(f"Encountered exception:", exc_info=e)
raise e
async def process_comment(comment: Comment, mod_reddit: Reddit) -> None:
results = await evaluate_comment(comment)
cleaned_permalink = comment.permalink.replace(comment.permalink.split("/")[5], "_")
log_content = (
f"New comment {comment.id} by {comment.author}\n"
f"https://www.reddit.com{cleaned_permalink}\n"
f"{comment.body[:1500]}\n\n"
)
log_func = logger.debug
for attribute, score in results.items():
log_content += f"{attribute:16s}: {score:6.2%}\n"
if score >= config.threshold[attribute]:
log_func = logger.info
# handoff to mod account to enable free-form report
comment = await mod_reddit.comment(comment.id, lazy=True)
await comment.report(
f"{attribute}: {score:.2%} | threshold: {config.threshold[attribute]:.2%}"
)
log_func(log_content)
async def evaluate_comment(comment: Comment) -> dict[str, float]:
params["comment"] = {"text": comment.body}
async with aiohttp.ClientSession() as session:
# sleep to avoid hitting rate limit
await asyncio.sleep(1)
async with session.post(url, json=params) as response:
response_dict = await response.json()
return {
attribute.lower(): val["summaryScore"]["value"]
for attribute, val in response_dict["attributeScores"].items()
}
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
|
StarcoderdataPython
|
1617335
|
import pickle
import numpy as np
from tqdm.auto import tqdm
import moses
from moses import CharVocab
class NGram:
def __init__(self, max_context_len=10, verbose=False):
self.max_context_len = max_context_len
self._dict = dict()
self.vocab = None
self.default_probs = None
self.zero_probs = None
self.verbose = verbose
def fit(self, data):
self.vocab = CharVocab.from_data(data)
self.default_probs = np.hstack([np.ones(len(self.vocab)-4),
np.array([0., 1., 0., 0.])])
self.zero_probs = np.zeros(len(self.vocab))
if self.verbose:
print('fitting...')
data = tqdm(data, total=len(data))
for line in data:
t_line = tuple(self.vocab.string2ids(line, True, True))
for i in range(len(t_line)):
for shift in range(self.max_context_len):
if i + shift + 1 >= len(t_line):
break
context = t_line[i:i+shift+1]
cid = t_line[i+shift+1]
probs = self._dict.get(context, self.zero_probs.copy())
probs[cid] += 1.
self._dict[context] = probs
def fit_update(self, data):
if self.verbose:
print('fitting...')
data = tqdm(data, total=len(data))
for line in data:
t_line = tuple(self.vocab.string2ids(line, True, True))
for i in range(len(t_line)):
for shift in range(self.max_context_len):
if i + shift + 1 >= len(t_line):
break
context = t_line[i:i+shift+1]
cid = t_line[i+shift+1]
probs = self._dict.get(context, self.zero_probs.copy())
probs[cid] += 1.
self._dict[context] = probs
def generate_one(self, l_smooth=0.01, context_len=None, max_len=100):
if self.vocab is None:
raise RuntimeError('Error: Fit the model before generating')
if context_len is None:
context_len = self.max_context_len
elif context_len <= 0 or context_len > self.max_context_len:
context_len = self.max_context_len
res = [self.vocab.bos]
while res[-1] != self.vocab.eos and len(res) < max_len:
begin_index = max(len(res)-context_len, 0)
context = tuple(res[begin_index:])
while context not in self._dict:
context = context[1:]
probs = self._dict[context]
smoothed = probs + self.default_probs*l_smooth
normed = smoothed / smoothed.sum()
next_symbol = np.random.choice(len(self.vocab), p=normed)
res.append(next_symbol)
return self.vocab.ids2string(res)
def nll(self, smiles, l_smooth=0.01, context_len=None):
if self.vocab is None:
raise RuntimeError('Error: model is not trained')
if context_len is None:
context_len = self.max_context_len
elif context_len <= 0 or context_len > self.max_context_len:
context_len = self.max_context_len
tokens = tuple(self.vocab.string2ids(smiles, True, True))
likelihood = 0.
for i in range(1, len(tokens)):
begin_index = max(i-context_len, 0)
context = tokens[begin_index:i]
while context not in self._dict:
context = context[1:]
probs = self._dict[context] + self.default_probs
normed = probs / probs.sum()
prob = normed[tokens[i]]
if prob == 0.:
return np.inf
likelihood -= np.log(prob)
return likelihood
def generate(self, n, l_smooth=0.01, context_len=None, max_len=100):
generator = (self.generate_one(l_smooth,
context_len,
max_len) for i in range(n))
if self.verbose:
print('generating...')
generator = tqdm(generator, total=n)
return list(generator)
def save(self, path):
"""
Saves a model using pickle
Arguments:
path: path to .pkl file for saving
"""
if self.vocab is None:
raise RuntimeError("Can't save empty model."
" Fit the model first")
data = {
'_dict': self._dict,
'vocab': self.vocab,
'default_probs': self.default_probs,
'zero_probs': self.zero_probs,
'max_context_len': self.max_context_len
}
with open(path, 'wb') as f:
pickle.dump(data, f)
@classmethod
def load(cls, path):
"""
Loads saved model
Arguments:
path: path to saved .pkl file
Returns:
Loaded NGramGenerator
"""
with open(path, "rb") as f:
data = pickle.load(f)
model = cls()
model._dict = data['_dict']
model.vocab = data['vocab']
model.default_probs = data['default_probs']
model.zero_probs = data['zero_probs']
model.max_context_len = data['max_context_len']
return model
def reproduce(seed, samples_path=None, metrics_path=None,
n_jobs=1, device='cpu', verbose=False,
samples=30000):
data = moses.get_dataset('train')
model = NGram(10, verbose=verbose)
model.fit(data)
np.random.seed(seed)
smiles = model.generate(samples, l_smooth=0.01)
metrics = moses.get_all_metrics(smiles, n_jobs=n_jobs, device=device)
if samples_path is not None:
with open(samples_path, 'w') as out:
out.write('SMILES\n')
for s in smiles:
out.write(s+'\n')
if metrics_path is not None:
with open(metrics_path, 'w') as out:
for key, value in metrics.items():
out.write("%s,%f\n" % (key, value))
return smiles, metrics
|
StarcoderdataPython
|
3332552
|
<filename>src/server/TCGA/TCGACaller.py
__author__ = 'guorongxu'
import os
import subprocess
import itertools
from datetime import datetime
tumor_types = ["PRAD", "STES"]
#tumor_types = ["ACC", "BLCA", "BRCA", "CESC", "CHOL", "COAD", "COADREAD", "DLBC",
# "ESCA", "GBM", "GBMLGG", "HNSC", "KICH", "KIPAN", "KIRC", "KIRP",
# "LAML", "LGG", "LUAD", "LUSC", "MESO", "OV", "PAAD", "PCPG",
# "PRAD", "READ", "SARC", "SKCM", "STAD", "STES", "TGCT", "THCA",
# "THYM", "UCEC", "UCS", "UVM"]
## To download raw data from TCGA website.
def download(workspace, data_set, release_year, release_month, release_day):
root_raw_dir = workspace + "/" + data_set + "/raw_data"
for tumor_type in tumor_types:
subprocess.call(["qsub", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "download", root_raw_dir,
tumor_type, release_year, release_month, release_day])
## To parse the raw data and output the expression tables
def parse(workspace, data_set, release_year, release_month, release_day):
root_raw_dir = workspace + "/" + data_set + "/raw_data"
root_expression_dir = workspace + "/" + data_set + "/expression_files"
for tumor_type in tumor_types:
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is parsing " + tumor_type + " miRNA data."
subprocess.call(["qsub", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "parse_mirna", root_raw_dir,
root_expression_dir, tumor_type, release_year, release_month, release_day])
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is parsing " + tumor_type + " RNASeq data."
subprocess.call(["qsub", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "parse_rnaseq", root_raw_dir,
root_expression_dir, tumor_type, release_year, release_month, release_day])
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is parsing " + tumor_type + " mutation data."
subprocess.call(["qsub", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "parse_mutation", root_raw_dir,
root_expression_dir, tumor_type, release_year, release_month, release_day])
## To calculate the correlations.
def calculate(workspace, data_set):
## Iterate all combinations
root_expression_dir = workspace + "/" + data_set + "/expression_files"
root_correlation_dir = workspace + "/" + data_set + "/correlation_files"
data_types = ["mirna", "rnaseq", "mutation"]
combinations = list(itertools.combinations_with_replacement(data_types, 2))
for tumor_type in tumor_types:
for combination in combinations:
input_file_0 = root_expression_dir + "/" + tumor_type + "/" + combination[0] + "_matrix.txt"
input_file_1 = root_expression_dir + "/" + tumor_type + "/" + combination[1] + "_matrix.txt"
output_file = root_correlation_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] + ".cor"
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is calculating correlation with " \
+ tumor_type + ": " + combination[0] + "_vs_" + combination[1] + "."
threshold = "0.0"
if combination[0] == "rnaseq" or combination[0] == "mutation":
subprocess.call(["qsub", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "calculate",
output_file, input_file_0, input_file_1, threshold])
else:
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "calculate",
output_file, input_file_0, input_file_1, threshold])
## To filter the correlation edges with less than cutoff.
def filter(workspace, data_set, cut_off=0.5):
root_correlation_dir = workspace + "/" + data_set + "/correlation_files"
## Iterate all combinations
data_types = ["mirna", "rnaseq", "mutation"]
combinations = list(itertools.combinations_with_replacement(data_types, 2))
for tumor_type in tumor_types:
for combination in combinations:
input_file = root_correlation_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] + ".cor"
output_file = root_correlation_dir + "/" + tumor_type + "/" \
+ combination[0] + "_vs_" + combination[1] + "_" + str(cut_off) + ".cor"
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is filtering " + tumor_type \
+ ": " + combination[0] + "_vs_" + combination[1]
subprocess.call(["qsub", "-pe", "smp", "8", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "filter", input_file, output_file, str(cut_off)])
## To cluster the input correlation file and output to the cluster directory.
def louvain_cluster(workspace, data_set):
root_correlation_dir = workspace + "/" + data_set + "/correlation_files"
root_cluster_dir = workspace + "/" + data_set + "/louvain_cluster_files/"
## Iterate all combinations
data_types = ["mirna", "rnaseq", "mutation"]
combinations = list(itertools.combinations_with_replacement(data_types, 2))
for tumor_type in tumor_types:
for combination in combinations:
input_file = root_correlation_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] + "_0.5.cor"
for gamma in [1, 4, 7, 11, 14, 17, 20]:
output_folder = root_cluster_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] + "_gamma_" + str(gamma)
output_file = output_folder + "/" + combination[0] + "_vs_" \
+ combination[1] + "_gamma_" + str(gamma) + ".louvain.tsv"
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is clustering " + input_file
subprocess.call(["qsub", "-pe", "smp", "2", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "louvain_cluster", input_file, output_file, str(gamma)])
## To cluster the input correlation file and output to the cluster directory.
def dedup_louvain_cluster(workspace, data_set):
root_cluster_dir = workspace + "/" + data_set + "/louvain_cluster_files"
root_unique_cluster_dir = workspace + "/" + data_set + "/louvain_unique_cluster_files"
## Iterate all combinations
data_types = ["mirna", "rnaseq", "mutation"]
combinations = list(itertools.combinations_with_replacement(data_types, 2))
for tumor_type in tumor_types:
for combination in combinations:
input_folder = root_cluster_dir + "/" + tumor_type
output_folder = root_unique_cluster_dir + "/" + tumor_type
if not os.path.exists(os.path.dirname(output_folder)):
os.makedirs(os.path.dirname(output_folder))
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is clustering " + input_folder
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "dedup_louvain_cluster", input_folder,
output_folder, combination[0] + "_vs_" + combination[1]])
## To cluster the input correlation file and output to the cluster directory.
def oslom_undirected_cluster(workspace, data_set):
root_correlation_dir = workspace + "/" + data_set + "/correlation_files"
root_cluster_dir = workspace + "/" + data_set + "/oslom_undirected_cluster_files/"
## Iterate all combinations
data_types = ["mirna", "rnaseq", "mutation"]
combinations = list(itertools.combinations_with_replacement(data_types, 2))
for tumor_type in tumor_types:
for combination in combinations:
input_file = root_correlation_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] + "_0.5.cor"
for gamma in [1]:
output_folder = root_cluster_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] + "_gamma_" + str(gamma)
output_file = output_folder + "/" + combination[0] + "_vs_" \
+ combination[1] + "_gamma_" + str(gamma) + ".oslom_undirected.tsv"
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
if not os.path.exists(output_file):
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is clustering " + input_file
subprocess.call(["qsub", "-pe", "smp", "8", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "oslom_undirected_cluster", input_file, output_file, str(gamma)])
## To cluster the input correlation file and output to the cluster directory.
def ivanovska_cluster(workspace, data_set):
root_correlation_dir = workspace + "/" + data_set + "/correlation_files"
root_cluster_dir = workspace + "/" + data_set + "/cluster_files"
## Iterate all combinations
data_types = ["mirna", "rnaseq"]
for tumor_type in tumor_types:
input_file = root_correlation_dir + "/" + tumor_type + "/" + data_types[0] + "_vs_" + data_types[1] + ".cor"
for gamma in [1]:
output_file = root_cluster_dir + "/" + tumor_type + "/" + data_types[0] + "_vs_" \
+ data_types[1] + "_gamma_" + str(gamma) + ".ivanovska.tsv"
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is clustering " + input_file
subprocess.call(["qsub", "-pe", "smp", "8", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "ivanovska_cluster", input_file, output_file, str(gamma)])
## To replace the correlation values in the cluster RNAseq_vs_RNAseq.
def replace_cluster(workspace, data_set):
root_correlation_dir = workspace + "/" + data_set + "/correlation_files"
root_cluster_dir = workspace + "/" + data_set + "/oslom_undirected_cluster_files"
## Iterate all combinations
data_types = ["rnaseq","rnaseq"]
for tumor_type in tumor_types:
input_file = root_correlation_dir + "/" + tumor_type + "/" + data_types[0] + "_vs_" + data_types[1] + "_0.5.cor"
for gamma in [1]:
cluster_folder = root_cluster_dir + "/" + tumor_type + "/" + data_types[0] + "_vs_" + data_types[1] + "_gamma_" + str(gamma)
cluster_file = cluster_folder + "/" + data_types[0] + "_vs_" \
+ data_types[1] + "_gamma_" + str(gamma) + ".oslom_undirected.tsv"
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is clustering " + input_file
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "replace_cluster", input_file, cluster_file, str(gamma)])
## To parse the cluster file and then print json file.
def print_louvain_json(workspace, data_set):
prefix = "clusters_tcga_louvain"
root_cluster_dir = workspace + "/" + data_set + "/louvain_unique_cluster_files"
root_louvain_json_dir = workspace + "/" + data_set + "/louvain_json_files"
## Iterate all combinations
data_types = ["mirna", "rnaseq", "mutation"]
combinations = list(itertools.combinations_with_replacement(data_types, 2))
print "printing louvain json..."
for tumor_type in tumor_types:
## Print cluster json files
for combination in combinations:
for gamma in [1, 4, 7, 11, 14, 17, 20]:
input_folder = root_cluster_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] + "_gamma_" + str(gamma)
input_file = input_folder + "/" + combination[0] + "_vs_" + combination[1] + "_gamma_" + str(gamma) + ".louvain.unique.tsv"
output_folder = root_louvain_json_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] + "_gamma_" + str(gamma)
output_file = output_folder + "/" + combination[0] + "_vs_" + combination[1] \
+ "_gamma_" + str(gamma) + ".json"
if os.path.exists(os.path.dirname(input_file)):
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is printing " + input_file
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "print_louvain_json", workspace, input_file,
output_file, tumor_type, combination[0] + "_vs_" + combination[1], prefix])
## Print star json files
#print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is printing " + tumor_type
#subprocess.call(["qsub", "-pe", "smp", "4", "-o", "search_engine.log", "-e", "search_engine.log",
# workspace + "/codes/" + data_set + "/tcga.sh", "print_star_json", workspace, data_set, tumor_type])
## To parse the cluster file and then print json file.
def print_oslom_undirected_json(workspace, data_set):
prefix = "clusters_tcga_oslom"
root_cluster_dir = workspace + "/" + data_set + "/oslom_undirected_cluster_files"
root_louvain_json_dir = workspace + "/" + data_set + "/oslom_undirected_json_files"
## Iterate all combinations
data_types = ["mirna", "rnaseq", "mutation"]
combinations = list(itertools.combinations_with_replacement(data_types, 2))
for tumor_type in tumor_types:
## Print cluster json files
for combination in combinations:
for gamma in [1]:
input_folder = root_cluster_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] + "_gamma_" + str(gamma)
input_file = input_folder + "/" + combination[0] + "_vs_" + combination[1] + "_gamma_" + str(gamma) + ".oslom_undirected.tsv"
output_file = root_louvain_json_dir + "/" + tumor_type + "/" + combination[0] + "_vs_" + combination[1] \
+ "_gamma_" + str(gamma) + ".json"
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is printing " + input_file
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "print_oslom_json", workspace, input_file,
output_file, tumor_type, combination[0] + "_vs_" + combination[1], prefix])
## Print star json files
#print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is printing " + tumor_type
#subprocess.call(["qsub", "-pe", "smp", "4", "-o", "search_engine.log", "-e", "search_engine.log",
# workspace + "/codes/" + data_set + "/tcga.sh", "print_star_oslom_json", workspace, data_set, tumor_type])
## To parse the cluster file and then print json file.
def print_ivanovska_json(workspace, data_set):
prefix = "clusters_tcga_ivanovska"
root_cluster_dir = workspace + "/" + data_set + "/cluster_files"
root_ivanovska_json_dir = workspace + "/" + data_set + "/ivanovska_json_files"
## Iterate all combinations
data_types = ["mirna", "rnaseq"]
for tumor_type in tumor_types:
## Print cluster json files
for gamma in [1]:
input_file = root_cluster_dir + "/" + tumor_type + "/" + data_types[0] + "_vs_" \
+ data_types[1] + "_gamma_" + str(gamma) + ".tsv"
output_file = root_ivanovska_json_dir + "/" + tumor_type + "/" + data_types[0] + "_vs_" + data_types[1] \
+ "_gamma_" + str(gamma) + ".ivanovska.json"
if os.path.exists(os.path.dirname(input_file)):
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is printing " + input_file
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "print_ivanovska_json", workspace, input_file,
output_file, tumor_type, data_types[0] + "_vs_" + data_types[1], prefix])
## To print labels
def print_label(workspace, data_set):
## Iterate all combinations
data_types = ["mirna", "rnaseq", "mutation"]
for tumor_type in tumor_types:
for data_type in data_types:
## Print label
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is printing labels of " + tumor_type
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "print_label", workspace, data_set, tumor_type, data_type])
## To print schema
def print_schema(workspace, data_set):
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is printing schema of TCGA"
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "print_schema", workspace, data_set])
## To append id into json files
def append_id(workspace, data_set):
print datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": System is appending id into TCGA json files"
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/tcga.sh", "append_id", workspace, data_set])
|
StarcoderdataPython
|
3354663
|
"""BleBox climate entities tests."""
import json
from blebox_uniapi.box_types import get_latest_api_level
from .conftest import CommonEntity, DefaultBoxTest, future_date, jmerge
# TODO: remove
SUPPORT_TARGET_TEMPERATURE = 1
HVAC_MODE_OFF = "hvac mode off"
HVAC_MODE_HEAT = "hvac mode heat"
CURRENT_HVAC_OFF = "current hvac mode off"
CURRENT_HVAC_HEAT = "current hvac mode heat"
CURRENT_HVAC_IDLE = "current hvac mode idle"
ATTR_TEMPERATURE = "temperature"
TEMP_CELSIUS = "celsius"
class ClimateDevice:
def __init__(self):
self._state = None
@property
def state(self):
if self._feature.is_on is None:
return None
if not self._feature.is_on:
return HVAC_MODE_OFF
return HVAC_MODE_HEAT
@property
def device_class(self):
return None
class BleBoxClimateEntity(CommonEntity, ClimateDevice):
def __init__(self, feature):
super().__init__(feature)
ClimateDevice.__init__(self)
pass
"""Representation of a BleBox climate feature."""
@property
def supported_features(self):
"""Return the supported climate features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def hvac_mode(self):
"""Return the desired HVAC mode."""
return {None: None, False: HVAC_MODE_OFF, True: HVAC_MODE_HEAT}[
self._feature.is_on
]
@property
def hvac_action(self):
"""Return the actual current HVAC action."""
on = self._feature.is_on
if not on:
return None if on is None else CURRENT_HVAC_OFF
states = {None: None, False: CURRENT_HVAC_IDLE, True: CURRENT_HVAC_HEAT}
heating = self._feature.is_heating
return states[heating]
@property
def hvac_modes(self):
"""Return a list of possible HVAC modes."""
return (HVAC_MODE_OFF, HVAC_MODE_HEAT)
@property
def temperature_unit(self):
"""Return the temperature unit."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._feature.current
@property
def max_temp(self):
"""Return the maximum thermostat setting."""
return self._feature.max_temp
@property
def min_temp(self):
"""Return the minimum thermostat setting."""
return self._feature.min_temp
@property
def target_temperature(self):
"""Return the desired thermostat temperature."""
return self._feature.desired
async def async_set_hvac_mode(self, hvac_mode):
"""Set the climate entity mode."""
modemap = {HVAC_MODE_OFF: "async_off", HVAC_MODE_HEAT: "async_on"}
await getattr(self._feature, modemap[hvac_mode])()
async def async_set_temperature(self, **kwargs):
"""Set the thermostat temperature."""
value = kwargs[ATTR_TEMPERATURE]
await self._feature.async_set_temperature(value)
class TestSauna(DefaultBoxTest):
"""Tests for entities representing a BleBox saunaBox."""
DEVCLASS = "climates"
ENTITY_CLASS = BleBoxClimateEntity
DEV_INFO_PATH = "api/heat/extended/state"
DEVICE_INFO = json.loads(
"""
{
"device": {
"deviceName": "My SaunaBox",
"type": "saunaBox",
"fv": "0.176",
"hv": "0.6",
"apiLevel": "20180604",
"id": "1afe34db9437",
"ip": "192.168.1.11"
}
}
"""
)
def patch_version(apiLevel):
"""Generate a patch for a JSON state fixture."""
return f"""
{{ "device": {{ "apiLevel": {apiLevel} }} }}
"""
DEVICE_INFO_FUTURE = jmerge(DEVICE_INFO, patch_version(future_date()))
DEVICE_INFO_LATEST = jmerge(
DEVICE_INFO, patch_version(get_latest_api_level("saunaBox"))
)
DEVICE_INFO_UNSUPPORTED = jmerge(DEVICE_INFO, patch_version(20180603))
DEVICE_INFO_UNSPECIFIED_API = json.loads(
"""
{
"device": {
"deviceName": "My SaunaBox",
"type": "saunaBox",
"fv": "0.176",
"hv": "0.6",
"id": "1afe34db9437",
"ip": "192.168.1.11"
}
}
"""
)
def patch_state(state, current, desired):
"""Generate a patch for a JSON state fixture."""
return f"""
{{
"heat": {{
"state": {state},
"desiredTemp": {desired},
"sensors": [ {{ "value": {current} }} ]
}}
}}
"""
STATE_DEFAULT = json.loads(
"""
{
"heat": {
"state": 0,
"desiredTemp": 6428,
"maximumTemp": 12166,
"minimumTemp": -5166,
"sensors": [
{
"type": "temperature",
"id": 0,
"value": 3996,
"trend": 0,
"state": 2,
"elapsedTimeS": 0
}
]
}
}
"""
)
STATE_OFF_BELOW = STATE_DEFAULT
STATE_NEEDS_HEATING = jmerge(STATE_DEFAULT, patch_state(1, 2320, 3871))
STATE_OFF_ABOVE = jmerge(STATE_DEFAULT, patch_state(0, 3871, 2876))
STATE_NEEDS_COOLING = jmerge(STATE_DEFAULT, patch_state(1, 3871, 2876))
STATE_REACHED = jmerge(STATE_DEFAULT, patch_state(1, 2320, 2320))
STATE_THERMO_SET = jmerge(STATE_DEFAULT, patch_state(1, 2320, 4320))
async def test_init(self, aioclient_mock):
"""Test default state."""
await self.allow_get_info(aioclient_mock)
entity = (await self.async_entities(aioclient_mock))[0]
assert entity.name == "My SaunaBox (saunaBox#thermostat)"
assert entity.unique_id == "BleBox-saunaBox-1afe34db9437-thermostat"
assert entity.device_class is None
assert entity.supported_features & SUPPORT_TARGET_TEMPERATURE
assert entity.hvac_modes == (HVAC_MODE_OFF, HVAC_MODE_HEAT)
assert entity.hvac_mode is None
assert entity.hvac_action is None
assert entity.target_temperature is None
assert entity.temperature_unit == TEMP_CELSIUS
assert entity.state is None
assert entity.max_temp is None
assert entity.min_temp is None
async def test_device_info(self, aioclient_mock):
await self.allow_get_info(aioclient_mock, self.DEVICE_INFO)
entity = (await self.async_entities(aioclient_mock))[0]
assert entity.device_info["name"] == "My SaunaBox"
assert entity.device_info["mac"] == "1afe34db9437"
assert entity.device_info["manufacturer"] == "BleBox"
assert entity.device_info["model"] == "saunaBox"
assert entity.device_info["sw_version"] == "0.176"
async def test_update(self, aioclient_mock):
"""Test updating."""
entity = await self.updated(aioclient_mock, self.STATE_DEFAULT)
assert entity.hvac_mode == HVAC_MODE_OFF
assert entity.hvac_action == CURRENT_HVAC_OFF
assert entity.target_temperature == 64.3
assert entity.current_temperature == 40.0
assert entity.temperature_unit == TEMP_CELSIUS
assert entity.max_temp == 121.7
assert entity.min_temp == -51.7
async def test_on_when_below_target(self, aioclient_mock):
"""Test when temperature is below desired."""
entity = await self.updated(aioclient_mock, self.STATE_OFF_BELOW)
assert entity.state == entity.hvac_mode == HVAC_MODE_OFF
assert entity.hvac_action == CURRENT_HVAC_OFF
self.allow_get(aioclient_mock, "/s/1", self.STATE_NEEDS_HEATING)
await entity.async_set_hvac_mode(HVAC_MODE_HEAT)
assert entity.target_temperature == 38.7
assert entity.current_temperature == 23.2
assert entity.state == entity.hvac_mode == HVAC_MODE_HEAT
assert entity.hvac_action == CURRENT_HVAC_HEAT
async def test_on_when_above_target(self, aioclient_mock):
"""Test when temperature is below desired."""
entity = await self.updated(aioclient_mock, self.STATE_OFF_ABOVE)
assert entity.state == entity.hvac_mode == HVAC_MODE_OFF
assert entity.hvac_action == CURRENT_HVAC_OFF
self.allow_get(aioclient_mock, "/s/1", self.STATE_NEEDS_COOLING)
await entity.async_set_hvac_mode(HVAC_MODE_HEAT)
assert entity.target_temperature == 28.8
assert entity.current_temperature == 38.7
assert entity.state == entity.hvac_mode == HVAC_MODE_HEAT
assert entity.hvac_action == CURRENT_HVAC_IDLE
async def test_on_when_at_target(self, aioclient_mock):
"""Test when temperature is below desired."""
entity = await self.updated(aioclient_mock, self.STATE_OFF_ABOVE)
assert entity.state == entity.hvac_mode == HVAC_MODE_OFF
assert entity.hvac_action == CURRENT_HVAC_OFF
self.allow_get(aioclient_mock, "/s/1", self.STATE_REACHED)
await entity.async_set_hvac_mode(HVAC_MODE_HEAT)
assert entity.target_temperature == 23.2
assert entity.current_temperature == 23.2
assert entity.state == entity.hvac_mode == HVAC_MODE_HEAT
assert entity.hvac_action == CURRENT_HVAC_IDLE
async def test_off(self, aioclient_mock):
"""Test turning off."""
entity = await self.updated(aioclient_mock, self.STATE_REACHED)
self.allow_get(aioclient_mock, "/s/0", self.STATE_OFF_BELOW)
await entity.async_set_hvac_mode(HVAC_MODE_OFF)
assert entity.target_temperature == 64.3
assert entity.current_temperature == 40.0
assert entity.state == entity.hvac_mode == HVAC_MODE_OFF
assert entity.hvac_action == CURRENT_HVAC_OFF
async def test_set_thermo(self, aioclient_mock):
"""Test setting thermostat."""
entity = await self.updated(aioclient_mock, self.STATE_REACHED)
self.allow_get(aioclient_mock, "/s/t/4321", self.STATE_THERMO_SET)
await entity.async_set_temperature(**{ATTR_TEMPERATURE: 43.21})
assert entity.current_temperature == 23.2 # no change yet
assert entity.target_temperature == 43.2
assert entity.state == entity.hvac_mode == HVAC_MODE_HEAT
assert entity.hvac_action == CURRENT_HVAC_HEAT
|
StarcoderdataPython
|
3212640
|
<gh_stars>0
from unittest import TestCase
from unittest.mock import patch, MagicMock, PropertyMock
from bson import ObjectId
from django_mock_queries.query import MockSet
from mlplaygrounds.datasets.tests.mocks.managers import MockDatasetManager
from ..models import User, CustomUserManager
class TestUserModel(TestCase):
def setUp(self):
mock_manager = MockDatasetManager()
mock_manager.collection.insert_many([
{'_id': 1, 'user_id': 'john', 'name': 'jdata', 'data': {}},
{'_id': 2, 'user_id': 'test', 'name': 'test', 'data': {}}
])
self.manager_patcher = patch(
'mlplaygrounds.users.models.Dataset.objects',
new_callable=PropertyMock(return_value=mock_manager)
)
self.manager_patcher.start()
def test_datasets(self):
expected_dataset = {'uid': 2, 'name': 'test'}
user = User()
user.username = 'test'
user_datasets = [
{'uid': dataset.uid, 'name': dataset.name}
for dataset in user.datasets
]
self.assertEqual(user_datasets, [expected_dataset])
def tearDown(self):
self.manager_patcher.stop()
class TestUserManager(TestCase):
def setUp(self):
self.save_patcher = patch(
'mlplaygrounds.users.models.User.save',
return_value=MagicMock()
)
self.save_patcher.start()
def tearDown(self):
self.save_patcher.stop()
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_user(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = False
manager = CustomUserManager()
user = manager.create_user('usr', 'pass', 'email', 'name', 'sname')
self.assertEqual(user.username, 'usr')
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_invalid_user(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = True
manager = CustomUserManager()
with self.assertRaises(ValueError):
manager.create_user('usr', 'pass', 'email', 'name', 'sname')
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_superuser(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = False
manager = CustomUserManager()
user = manager.create_superuser('susr', 'pass', 'semail', 'name', 'sname')
self.assertEqual(user.is_superuser, True)
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_invalid_superuser(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = True
manager = CustomUserManager()
with self.assertRaises(ValueError):
manager.create_superuser('susr', 'pass', 'semail', 'name', 'sname')
@patch('mlplaygrounds.users.models.User.objects')
def test_check_credentials_in_use(self, mock_objects):
mock_objects.filter.return_value = mock_objects
mock_objects.exists.return_value = True
manager = CustomUserManager()
in_use = manager._credentials_already_in_use('mary', '<EMAIL>')
self.assertEqual(in_use, True)
@patch('mlplaygrounds.users.models.User.objects')
def test_check_credentials_not_in_use(self, mock_objects):
mock_objects.filter.return_value = mock_objects
mock_objects.exists.return_value = False
manager = CustomUserManager()
in_use = manager._credentials_already_in_use('patrick', '<EMAIL>')
self.assertEqual(in_use, False)
|
StarcoderdataPython
|
68580
|
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import datetime
import seaborn as sns
from matplotlib.colors import ListedColormap
import numpy as np
df = pd.read_excel('data/R1_5_results_only.xlsx')
rc = {'figure.figsize': (10, 5),
'axes.facecolor': 'white',
'axes.grid': True,
'grid.color': '.8',
'font.family': 'Arial Narrow',
'font.size': 15}
plt.rcParams.update(rc)
colors = ["#0F6FC6", "#009DD9", '#0BD0D9', '#10CF9B', '#7CCA62', '#A5C249']
# Set your custom color palette
sns.set_palette(sns.color_palette(colors))
f, axes = plt.subplots(1, 3, figsize=(20, 8))
f.suptitle('Performance Analysis Quintile Portfolios')
sub1 = sns.stripplot(x='Quintile', y="Mean_Annual", data=df, s=10, ax=axes[0],
cmap=sns.color_palette(colors))
sub1.set(xlabel="Quintile", ylabel="Annualized Return")
axes[0].set_title('Total Return Analysis')
sub2 = sns.stripplot(x='Quintile', y="Std_Annualized", data=df, s=10, ax=axes[1])
sub2.set(xlabel="Quintile", ylabel="Std. Dev.")
axes[1].set_title('Volatility Analysis')
# axes[1].get_legend().remove()
sub3 = sns.stripplot(x='Quintile', y="Sharpe_Ratio", data=df, s=10, ax=axes[2])
sub3.set(xlabel="Quintile", ylabel="Sharpe Ratio")
axes[2].set_title('Risk adjusted Performance')
# axes[2].get_legend().remove()
sns.despine(top=False, right=False, left=False, bottom=False, offset=None, trim=False)
plt.subplots_adjust(left=0.05, right=0.95)
#
# todo uncomment to save to respective file_types
plt.savefig('graphs_presentation_arial_narrow/R1_5_sharpe_ratio_v2.eps', dpi=300)
# plt.savefig('graphs/R1_5_sharpe_ratio_v2.png', dpi=300)
# plt.savefig('graphs/R1_5_sharpe_ratio_v2.pdf', dpi=300)
plt.show()
|
StarcoderdataPython
|
3316949
|
# -*- coding: utf-8 -*-
import localhostrunner
import os.path
from django.conf import settings
from django.test.runner import DiscoverRunner
class LocalhostTestRunner(DiscoverRunner):
def run_suite(self, suite, **kwargs):
return localhostrunner.LocalhostTestRunner(**kwargs).run(suite)
|
StarcoderdataPython
|
103696
|
<gh_stars>1-10
df.groupby('Pclass')['Fare'].hist(alpha=0.4);
|
StarcoderdataPython
|
1765275
|
import re
from . import Mod
def remove_comments(output):
output = re.sub(r'(\/\*[\w\'\s\n\*]*\*\/)', r'', output) # multi-line comments
output = re.sub(r'((?:[\s;]+)|^)(\/\/.*$)', r'\1', output) # single-line comments
return output
mod_remove_comments = Mod(remove_comments)
|
StarcoderdataPython
|
3339449
|
<reponame>caosenqi/Edward1
import numpy as np
import tensorflow as tf
from edward.data import Data
from edward.util import logit, get_session
def evaluate(metrics, model, variational, data):
"""
Evaluate fitted model using a set of metrics.
Parameters
----------
metric : list or str
List of metrics or a single metric.
Returns
-------
list or float
A list of evaluations or a single evaluation.
"""
sess = get_session()
# Monte Carlo estimate the mean of the posterior predictive:
# 1. Sample a batch of latent variables from posterior
xs = data.data
n_minibatch = 100
zs, samples = variational.sample(xs, size=n_minibatch)
feed_dict = variational.np_sample(samples, n_minibatch)
# 2. Make predictions, averaging over each sample of latent variables
y_pred, y_true = model.predict(xs, zs)
# Evaluate y_pred according to y_true for all metrics.
evaluations = []
if isinstance(metrics, str):
metrics = [metrics]
for metric in metrics:
if metric == 'accuracy' or metric == 'crossentropy':
# automate binary or sparse cat depending on max(y_true)
support = tf.reduce_max(y_true).eval()
if support <= 1:
metric = 'binary_' + metric
else:
metric = 'sparse_categorical_' + metric
if metric == 'binary_accuracy':
evaluations += [sess.run(binary_accuracy(y_true, y_pred), feed_dict)]
elif metric == 'categorical_accuracy':
evaluations += [sess.run(categorical_accuracy(y_true, y_pred), feed_dict)]
elif metric == 'sparse_categorical_accuracy':
evaluations += [sess.run(sparse_categorical_accuracy(y_true, y_pred), feed_dict)]
elif metric == 'log_loss' or metric == 'binary_crossentropy':
evaluations += [sess.run(binary_crossentropy(y_true, y_pred), feed_dict)]
elif metric == 'categorical_crossentropy':
evaluations += [sess.run(categorical_crossentropy(y_true, y_pred), feed_dict)]
elif metric == 'sparse_categorical_crossentropy':
evaluations += [sess.run(sparse_categorical_crossentropy(y_true, y_pred), feed_dict)]
elif metric == 'hinge':
evaluations += [sess.run(hinge(y_true, y_pred), feed_dict)]
elif metric == 'squared_hinge':
evaluations += [sess.run(squared_hinge(y_true, y_pred), feed_dict)]
elif metric == 'mse' or metric == 'MSE' or \
metric == 'mean_squared_error':
evaluations += [sess.run(mean_squared_error(y_true, y_pred), feed_dict)]
elif metric == 'mae' or metric == 'MAE' or \
metric == 'mean_absolute_error':
evaluations += [sess.run(mean_absolute_error(y_true, y_pred), feed_dict)]
elif metric == 'mape' or metric == 'MAPE' or \
metric == 'mean_absolute_percentage_error':
evaluations += [sess.run(mean_absolute_percentage_error(y_true, y_pred), feed_dict)]
elif metric == 'msle' or metric == 'MSLE' or \
metric == 'mean_squared_logarithmic_error':
evaluations += [sess.run(mean_squared_logarithmic_error(y_true, y_pred), feed_dict)]
elif metric == 'poisson':
evaluations += [sess.run(poisson(y_true, y_pred), feed_dict)]
elif metric == 'cosine' or metric == 'cosine_proximity':
evaluations += [sess.run(cosine_proximity(y_true, y_pred), feed_dict)]
elif metric == 'log_lik' or metric == 'log_likelihood':
evaluations += [sess.run(y_pred, feed_dict)]
else:
raise NotImplementedError()
if len(evaluations) == 1:
return evaluations[0]
else:
return evaluations
def ppc(model, variational=None, data=Data(), T=None, size=100):
"""
Posterior predictive check.
(Rubin, 1984; Meng, 1994; <NAME>, and Stern, 1996)
If variational is not specified, it defaults to a prior predictive
check (Box, 1980).
PPC's form an empirical distribution for the predictive discrepancy,
p(T) = \int p(T(yrep) | z) p(z | y) dz
by drawing replicated data sets yrep and calculating T(yrep) for
each data set. Then it compares it to T(y).
Parameters
----------
model : Model
class object with a 'sample_likelihood' method
variational : Variational, optional
latent variable distribution q(z) to sample from. It is an
approximation to the posterior, e.g., a variational
approximation or an empirical distribution from MCMC samples.
If not specified, samples will be obtained from model
with a 'sample_prior' method.
data : Data, optional
Observed data to compare to. If not specified, will return
only the reference distribution with an assumed replicated
data set size of 1.
T : function, optional
Discrepancy function written in TensorFlow. Default is
identity. It is a function taking in a data set
y and optionally a set of latent variables z as input.
size : int, optional
number of replicated data sets
Returns
-------
list
List containing the reference distribution, which is a Numpy
vector of size elements,
(T(yrep^{1}, z^{1}), ..., T(yrep^{size}, z^{size}));
and the realized discrepancy, which is a NumPy vector of size
elements,
(T(y, z^{1}), ..., T(y, z^{size})).
"""
sess = get_session()
y = data.data
if y == None:
N = 1
else:
N = data.N
if T == None:
T = lambda y, z=None: y
# 1. Sample from posterior (or prior).
# We must fetch zs out of the session because sample_likelihood()
# may require a SciPy-based sampler.
if variational != None:
zs, samples = variational.sample(y, size=size)
feed_dict = variational.np_sample(samples, size)
zs = sess.run(zs, feed_dict)
else:
zs = model.sample_prior(size=size)
zs = zs.eval()
# 2. Sample from likelihood.
yreps = model.sample_likelihood(zs, size=N)
# 3. Calculate discrepancy.
Tyreps = []
Tys = []
for yrep, z in zip(yreps, tf.unpack(zs)):
Tyreps += [T(yrep, z)]
if y != None:
Tys += [T(y, z)]
if y == None:
return sess.run(tf.pack(Tyreps), feed_dict)
else:
return sess.run([tf.pack(Tyreps), tf.pack(Tys)], feed_dict)
# Classification metrics
def binary_accuracy(y_true, y_pred):
"""
Binary prediction accuracy, also known as 0/1-loss.
Parameters
----------
y_true : tf.Tensor
Tensor of 0s and 1s.
y_pred : tf.Tensor
Tensor of probabilities.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(tf.round(y_pred), tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
def categorical_accuracy(y_true, y_pred):
"""
Multi-class prediction accuracy. One-hot representation for
y_true.
Parameters
----------
y_true : tf.Tensor
Tensor of 0s and 1s, where the outermost dimension of size K
has only one 1 per row.
y_pred : tf.Tensor
Tensor of probabilities, with same shape as y_true.
The outermost dimension denote the categorical probabilities for
that data point per row.
"""
y_true = tf.cast(tf.argmax(y_true, len(y_true.get_shape()) - 1), tf.float32)
y_pred = tf.cast(tf.argmax(y_pred, len(y_pred.get_shape()) - 1), tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
def sparse_categorical_accuracy(y_true, y_pred):
"""
Multi-class prediction accuracy. Label {0, 1, .., K-1}
representation for y_true.
Parameters
----------
y_true : tf.Tensor
Tensor of integers {0, 1, ..., K-1}.
y_pred : tf.Tensor
Tensor of probabilities, with shape (y_true.get_shape(), K).
The outermost dimension are the categorical probabilities for
that data point.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(tf.argmax(y_pred, len(y_pred.get_shape()) - 1), tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
def binary_crossentropy(y_true, y_pred):
"""
Parameters
----------
y_true : tf.Tensor
Tensor of 0s and 1s.
y_pred : tf.Tensor
Tensor of probabilities.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = logit(tf.cast(y_pred, tf.float32))
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(y_pred, y_true))
def categorical_crossentropy(y_true, y_pred):
"""
Multi-class cross entropy. One-hot representation for y_true.
Parameters
----------
y_true : tf.Tensor
Tensor of 0s and 1s, where the outermost dimension of size K
has only one 1 per row.
y_pred : tf.Tensor
Tensor of probabilities, with same shape as y_true.
The outermost dimension denote the categorical probabilities for
that data point per row.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = logit(tf.cast(y_pred, tf.float32))
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred, y_true))
def sparse_categorical_crossentropy(y_true, y_pred):
"""
Multi-class cross entropy. Label {0, 1, .., K-1} representation
for y_true.
Parameters
----------
y_true : tf.Tensor
Tensor of integers {0, 1, ..., K-1}.
y_pred : tf.Tensor
Tensor of probabilities, with shape (y_true.get_shape(), K).
The outermost dimension are the categorical probabilities for
that data point.
"""
y_true = tf.cast(y_true, tf.int64)
y_pred = logit(tf.cast(y_pred, tf.float32))
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y_pred, y_true))
def hinge(y_true, y_pred):
"""
Parameters
----------
y_true : tf.Tensor
Tensor of 0s and 1s.
y_pred : tf.Tensor
Tensor of real value.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.maximum(1.0 - y_true * y_pred, 0.0))
def squared_hinge(y_true, y_pred):
"""
Parameters
----------
y_true : tf.Tensor
Tensor of 0s and 1s.
y_pred : tf.Tensor
Tensor of real value.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.square(tf.maximum(1.0 - y_true * y_pred, 0.0)))
# Regression metrics
def mean_squared_error(y_true, y_pred):
"""
Parameters
----------
y_true : tf.Tensor
y_pred : tf.Tensor
Tensors of same shape and type.
"""
return tf.reduce_mean(tf.square(y_pred - y_true))
def mean_absolute_error(y_true, y_pred):
"""
Parameters
----------
y_true : tf.Tensor
y_pred : tf.Tensor
Tensors of same shape and type.
"""
return tf.reduce_mean(tf.abs(y_pred - y_true))
def mean_absolute_percentage_error(y_true, y_pred):
"""
Parameters
----------
y_true : tf.Tensor
y_pred : tf.Tensor
Tensors of same shape and type.
"""
diff = tf.abs((y_true - y_pred) / tf.clip_by_value(tf.abs(y_true), 1e-8, np.inf))
return 100.0 * tf.reduce_mean(diff)
def mean_squared_logarithmic_error(y_true, y_pred):
"""
Parameters
----------
y_true : tf.Tensor
y_pred : tf.Tensor
Tensors of same shape and type.
"""
first_log = tf.log(tf.clip_by_value(y_pred, 1e-8, np.inf) + 1.0)
second_log = tf.log(tf.clip_by_value(y_true, 1e-8, np.inf) + 1.0)
return tf.reduce_mean(tf.square(first_log - second_log))
def poisson(y_true, y_pred):
"""
Negative Poisson log-likelihood of data y_true given predictions
y_pred (up to proportion).
Parameters
----------
y_true : tf.Tensor
y_pred : tf.Tensor
Tensors of same shape and type.
"""
return tf.reduce_sum(y_pred - y_true * tf.log(y_pred + 1e-8))
def cosine_proximity(y_true, y_pred):
"""
Cosine similarity of two vectors.
Parameters
----------
y_true : tf.Tensor
y_pred : tf.Tensor
Tensors of same shape and type.
"""
y_true = tf.nn.l2_normalize(y_true, len(y_true.get_shape()) - 1)
y_pred = tf.nn.l2_normalize(y_pred, len(y_pred.get_shape()) - 1)
return tf.reduce_sum(y_true * y_pred)
|
StarcoderdataPython
|
3227742
|
<filename>src/train.py
from tensorflow import keras
import tensorflow as tf
import archs
from utils import data_utils, train_utils, augment, argmanager
from utils.loss import multinomial_nll
import numpy as np
import random
import string
import math
import os
import json
def subsample_nonpeak_data(nonpeak_seqs, nonpeak_cts, peak_data_size, negative_sampling_ratio):
#Randomly samples a portion of the non-peak data to use in training
num_nonpeak_samples = int(negative_sampling_ratio * peak_data_size)
nonpeak_indices_to_keep = np.random.choice(len(nonpeak_seqs), size=num_nonpeak_samples, replace=False)
nonpeak_seqs = nonpeak_seqs[nonpeak_indices_to_keep]
nonpeak_cts = nonpeak_cts[nonpeak_indices_to_keep]
return nonpeak_seqs, nonpeak_cts
class BatchGenerator(keras.utils.Sequence):
"""
This generator randomly crops (=jitter) and revcomps training examples for
every epoch
"""
def __init__(self, peak_seqs, nonpeak_seqs, peak_cts, nonpeak_cts, negative_sampling, negative_sampling_ratio, inputlen, outputlen, batch_size):
"""
seqs: B x L' x 4
cts: B x M'
inputlen: int (L <= L'), L' is greater to allow for cropping (= jittering)
outputlen: int (M <= M'), M' is greater to allow for cropping (= jittering)
batch_size: int (B)
"""
self.peak_seqs, self.nonpeak_seqs = peak_seqs, nonpeak_seqs
self.peak_cts, self.nonpeak_cts = peak_cts, nonpeak_cts
self.negative_sampling = negative_sampling
self.negative_sampling_ratio = negative_sampling_ratio
self.inputlen = inputlen
self.outputlen = outputlen
self.batch_size = batch_size
# random crop training data to the desired sizes, revcomp augmentation
self.crop_revcomp_data()
def __len__(self):
return math.ceil(self.seqs.shape[0]/self.batch_size)
def crop_revcomp_data(self):
# random crop training data to inputlen and outputlen (with corresponding offsets), revcomp augmentation
# shuffle required since otherwise peaks and nonpeaks will be together
#Sample a fraction of the negative samples according to the specified ratio
if self.negative_sampling:
self.sampled_nonpeak_seqs, self.sampled_nonpeak_cts = subsample_nonpeak_data(self.nonpeak_seqs, self.nonpeak_cts, len(self.peak_seqs), self.negative_sampling_ratio)
self.seqs = np.vstack([self.peak_seqs, self.sampled_nonpeak_seqs])
self.cts = np.vstack([self.peak_cts, self.sampled_nonpeak_cts])
else:
self.seqs = np.vstack([self.peak_seqs, self.nonpeak_seqs])
self.cts = np.vstack([self.peak_cts, self.nonpeak_cts])
self.cur_seqs, self.cur_cts = augment.crop_revcomp_augment(
self.seqs, self.cts, self.inputlen, self.outputlen,
shuffle=True
)
def __getitem__(self, idx):
batch_seq = self.cur_seqs[idx*self.batch_size:(idx+1)*self.batch_size]
batch_cts = self.cur_cts[idx*self.batch_size:(idx+1)*self.batch_size]
return batch_seq, [batch_cts, np.log(1+batch_cts.sum(-1, keepdims=True))]
def on_epoch_end(self):
self.crop_revcomp_data()
def train_loop(model, inputlen, outputlen, train_peak_seqs, train_nonpeak_seqs, train_peak_cts, train_nonpeak_cts,
val_peak_seqs, val_nonpeak_seqs, val_peak_cts, val_nonpeak_cts, negative_sampling, negative_sampling_ratio, batch_size, epochs, early_stop, output_prefix):
if negative_sampling:
np.random.seed(1248)
val_nonpeak_seqs, val_nonpeak_cts = subsample_nonpeak_data(val_nonpeak_seqs, val_nonpeak_cts, len(val_peak_seqs), negative_sampling_ratio)
val_seqs = np.vstack([val_peak_seqs, val_nonpeak_seqs])
val_cts = np.vstack([val_peak_cts, val_nonpeak_cts])
# need generator to crop and revcomp aug training examples, but not for
# validation.
train_generator = BatchGenerator(train_peak_seqs, train_nonpeak_seqs,
train_peak_cts, train_nonpeak_cts, negative_sampling, negative_sampling_ratio, inputlen, outputlen, batch_size)
callbacks = train_utils.get_callbacks(early_stop, output_prefix)
history = model.fit(train_generator,
epochs=epochs,
validation_data=(val_seqs,
[val_cts,
np.log(1+val_cts.sum(-1, keepdims=True))]),
callbacks=callbacks)
return history
def main():
args = argmanager.fetch_train_args()
print(args)
if os.path.exists("{}.h5".format(args.output_prefix)):
raise OSError('File {}.h5 already exists'.format(args.output_prefix))
# load data
train_peaks_seqs, train_peaks_cts, train_nonpeaks_seqs, train_nonpeaks_cts,\
val_peaks_seqs, val_peaks_cts, val_nonpeaks_seqs, val_nonpeaks_cts = \
data_utils.load_train_val_data(
args.peaks, args.nonpeaks, args.genome, args.bigwig,
args.val_chr, args.test_chr, args.inputlen, args.outputlen, args.max_jitter,
outlier=0.9999
)
# compute loss weight factor for counts loss
counts_loss_weight = train_utils.get_counts_stat(train_peaks_cts,
args.outputlen) * args.counts_weight
print("\nCounts loss weight : {:.2f}\n".format(counts_loss_weight))
# prepare model
model = archs.bpnet_seq(args.inputlen, args.outputlen, args.filters, args.ndil)
opt = keras.optimizers.Adam(learning_rate=args.learning_rate)
model.compile(
optimizer=opt,
loss=[multinomial_nll, 'mse'],
loss_weights = [1, counts_loss_weight]
)
history = train_loop(model, args.inputlen, args.outputlen,
train_peaks_seqs, train_nonpeaks_seqs,
train_peaks_cts, train_nonpeaks_cts,
val_peaks_seqs, val_nonpeaks_seqs,
val_peaks_cts, val_nonpeaks_cts, args.negative_sampling, args.negative_sampling_ratio,
args.batch_size, args.epochs,
args.early_stop, args.output_prefix)
with open("{}.history.json".format(args.output_prefix), "w") as f:
json.dump(history.history, f, ensure_ascii=False, indent=4)
if __name__=="__main__":
main()
|
StarcoderdataPython
|
3347505
|
from threading import Lock
from typing import Any
DRIVER_CACHE_LOCK = Lock()
NOT_SET_MARKER = object()
def singleton_setup(obj: object,
key: str,
factory,
*args,
**kwargs) -> Any:
"""
Does:
obj.key = factory(*args, **kwargs) # but only once and in a thread safe manner
return obj.key
"""
v = getattr(obj, key, NOT_SET_MARKER)
if v is not NOT_SET_MARKER:
return v
with DRIVER_CACHE_LOCK:
v = getattr(obj, key, NOT_SET_MARKER)
if v is not NOT_SET_MARKER:
# very rare multi-thread only event.
# Other thread did it first but after this thread's initial check above.
# Disable test cover
return v # pragma: no cover
v = factory(*args, **kwargs)
setattr(obj, key, v)
return v
|
StarcoderdataPython
|
1629702
|
<reponame>RobertCraigie/prisma-client-py
import json
from typing import Any
import httpx
from ._types import Method
from .http_abstract import AbstractResponse, AbstractHTTP
__all__ = ('HTTP', 'Response', 'client')
class HTTP(AbstractHTTP[httpx.AsyncClient, httpx.Response]):
# pylint: disable=invalid-overridden-method,attribute-defined-outside-init
session: httpx.AsyncClient
async def download(self, url: str, dest: str) -> None:
# pyright thinks that stream() doesn't return a context manager
# due to how httpx handles compatability imports
# https://github.com/encode/httpx/discussions/1829
async with self.session.stream(
'GET', url, timeout=None
) as resp: # pyright: reportGeneralTypeIssues=false
resp.raise_for_status()
with open(dest, 'wb') as fd:
async for chunk in resp.aiter_bytes():
fd.write(chunk)
async def request(self, method: Method, url: str, **kwargs: Any) -> 'Response':
return Response(await self.session.request(method, url, **kwargs))
def open(self) -> None:
self.session = httpx.AsyncClient(**self.session_kwargs)
async def close(self) -> None:
if not self.closed:
await self.session.aclose()
# mypy doesn't like us assigning None as the type of
# session is not optional, however the argument that
# the setter takes is optional, so this is fine
self.session = None # type: ignore[assignment]
client: HTTP = HTTP()
class Response(AbstractResponse[httpx.Response]):
# pylint: disable=invalid-overridden-method
@property
def status(self) -> int:
return self.original.status_code
async def json(self, **kwargs: Any) -> Any:
return json.loads(await self.original.aread(), **kwargs)
async def text(self, **kwargs: Any) -> str:
return ''.join([part async for part in self.original.aiter_text(**kwargs)])
|
StarcoderdataPython
|
1670585
|
from server import app, db
# Import model definitions and then create the database
db.create_all()
db.session.commit()
if __name__ == "__main__":
app.run(debug=True)
|
StarcoderdataPython
|
3229124
|
DEBUG = False
BCRYPT_LOG_ROUNDS = 12
|
StarcoderdataPython
|
4818752
|
from datetime import datetime, timedelta
from django.conf import settings
from rest_framework import generics
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework_jwt.utils import jwt_encode_handler, jwt_payload_handler
from .models import StoreUser
class UserView(generics.CreateAPIView):
queryset = StoreUser.objects.all()
# serializer_class = StoreSerializer
permission_classes = [AllowAny, ]
def post(self, request):
if not self.request.data.get('phone_number'):
return Response({"message": "Invalid Request"})
if StoreUser.objects.filter(phone_number=request.data.get('phone_number')):
return Response({"message": "phone number already registered"})
user_obj = StoreUser.objects.create(
phone_number=self.request.data.get('phone_number'))
user_obj.set_password(request.data.get('password'))
user_obj.is_active = True
user_obj.save()
create_success_message = 'Your registration is completed successfully. Activate your account by entering the OTP which we have sent to your email.'
return Response({'message': create_success_message})
@api_view(['POST'])
@permission_classes((AllowAny, ))
def login(request):
phone_number = request.data.get('phone_number')
password = request.data.get('password')
if not (phone_number and password):
return Response({"message": "Invalid Request"})
user_obj_query = StoreUser.objects.filter(
phone_number=phone_number)
if not user_obj_query.exists():
return Response({"message": "user not found"})
user_obj = user_obj_query.first()
if not user_obj.is_active:
return Response({"message": "user not active"})
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
expiration = datetime.utcnow(
) + settings.JWT_AUTH['JWT_EXPIRATION_DELTA']
expiration_epoch = expiration.timestamp()
data = {
"token": token,
"token_expiration": expiration_epoch
}
return Response({'data': data})
|
StarcoderdataPython
|
3389004
|
<filename>codegen.py
from ifelse_stmt import *
from stmt import *
from while_stmt import *
from block import *
from program import *
from declare import *
from assign_stmt import *
from call_stmt import *
from read_stmt import *
from write_stmt import *
from pl0yacc import *
from op import *
def getop(ast):
return ast[0]
code = []
currlevel = 0
currM = {}
names = {}
def codegen(ast):
global currlevel,currM,code
op = getop(ast)
if op == 'program':
codegen(program_block(ast))
code.append(('OPR', 0, 'OPR_RET'))
elif op == 'block':
if currlevel not in currM:
currM[currlevel] = 0
currM[currlevel] += 3
code.append(('INC', 0, 3))
codegen(const_dec(ast))
codegen(var_dec(ast))
codegen(proc_dec(ast))
codegen(block_stmts(ast))
elif op == 'const':
if not const_is_empty(ast):
for con in const_list(ast):
names[con[0]] = ['const',con[1]]
elif op == 'var':
if not var_is_empty(ast):
for var in var_list(ast):
if not currlevel in currM:
currM[currlevel] = 0
names[var] = ['var',currlevel,currM[currlevel]]
currM[currlevel] += 1
code.append(('INC',0,len(var_list(ast))))
elif op == 'proc':
if not proc_is_empty(ast):
for proc in proc_list(ast):
names[proc] = ['proc', currlevel, len(code) + 1]
currlevel += 1
tmpBegin = len(code)
code.append(('JMP',0,0))
codegen(procs[proc])
code.append(('OPR',0,'OPR_RET'))
currlevel -= 1
code[tmpBegin] = ('JMP',0,len(code))
elif op == ':=':
codegen(assign_right(ast))
lexeme = assign_left(ast)
code.append(('STO',abs(names[lexeme][1]-currlevel),names[lexeme][2]))
elif op == 'call':
lexeme = call_proc(ast)
code.append(('CAL',abs(names[lexeme][1]-currlevel),names[lexeme][2]))
elif op == 'stmt':
stmt = get_stmt(ast)
if stmt is not None:
codegen(stmt)
next_stmt = get_next_stmt(ast)
if next_stmt is not None:
codegen(next_stmt)
elif op == 'if':
codegen(get_if_cond(ast))
tmpBeginOne = len(code)
stmt = get_if_then(ast)
code.append(('JPC', 0, 0))
codegen(stmt)
code.append(('JMP',0,0))
code[tmpBeginOne] = ('JPC',0,len(code))
elif op == 'if-else':
codegen(get_if_cond(ast))
tmpBeginOne = len(code)
code.append(('JPC', 0, 0))
codegen(get_if_then(ast))
tmpBeginTwo = len(code)
code.append(('JMP', 0, 0))
code[tmpBeginOne] = ('JPC', 0, len(code))
codegen(get_if_else(ast))
code[tmpBeginTwo] = ('JMP',0,len(code))
elif op == 'while':
tmpBeginOne = len(code)
codegen(get_while_cond(ast))
tmpBeginTwo = len(code)
code.append(('JPC',0,0))
codegen(get_while_stmt(ast))
code.append(('JMP',0,tmpBeginOne))
code[tmpBeginTwo] = ('JPC',0,len(code))
elif op == 'read':
lexeme = read_ident(ast)
code.append(('SIO_IN',0,2))
code.append(('STO',abs(names[lexeme][1]-currlevel),names[lexeme][2]))
elif op == 'write':
expr = write_expr(ast)
codegen(expr)
code.append(('SIO_OUT',0,1))
elif op == 'odd':
pass
elif op == '=':
codegen(eql_left(ast))
codegen(eql_right(ast))
code.append(('OPR',0,'OPR_EQL'))
elif op == '<>':
codegen(neq_left(ast))
codegen(neq_right(ast))
code.append(('OPR',0,'OPR_NEQ'))
elif op == '<':
codegen(less_left(ast))
codegen(less_right(ast))
code.append(('OPR',0,'OPR_LSS'))
elif op == '<=':
codegen(leq_left(ast))
codegen(leq_right(ast))
code.append(('OPR',0,'OPR_LEQ'))
elif op == '>':
codegen(grt_left(ast))
codegen(grt_right(ast))
code.append(('OPR',0,'OPR_GTR'))
elif op == '>=':
codegen(grq_left(ast))
codegen(grt_right(ast))
code.append(('OPR',0,'OPR_GEQ'))
elif op == '*':
codegen(mul_left(ast))
codegen(mul_right(ast))
code.append(('OPR',0,'OPR_MUL'))
elif op == '/':
codegen(div_left(ast))
codegen(div_right(ast))
code.append(('OPR',0,'OPR_DIV'))
elif op == '-':
if len(ast) == 2:
codegen(ast[1])
code.append(('OPR',0,'OPR_NEG'))
else:
codegen(minus_left(ast))
codegen(minus_right(ast))
code.append(('OPR',0,'OPR_SUB'))
elif op == '+':
codegen(plus_left(ast))
codegen(plus_right(ast))
code.append(('OPR',0,'OPR_ADD'))
elif op == 'ident':
lexeme = ast[1]
if names[lexeme][0] == 'var':
code.append(('LOD',abs(names[lexeme][1]-currlevel),names[lexeme][2]))
elif names[lexeme][0] == 'const':
code.append(('LIT',0,names[lexeme][1]))
else:
pass
elif op == 'number':
lexeme = ast[1]
code.append(('LIT', 0, lexeme))
else:
pass
return code
|
StarcoderdataPython
|
35156
|
<reponame>jykntr/rest-cli-client
import argparse
from profile import Profile
PROXY = 'proxy'
VERIFY = 'verify'
DEBUG = 'verbose'
class CliParser():
def __init__(self, requests, profiles, options):
self.requests = requests
self.profiles = profiles
self.options = options
self.args = None
# Use a pre-parser to get options that aren't data driven by the config file.
# Pre-parser checks global options and specified profile
preparser = argparse.ArgumentParser(add_help=False)
preparser = self._add_global_options(preparser)
known_args, _ = preparser.parse_known_args()
# Now build real parser
self.parser = argparse.ArgumentParser()
# Add options that can be specified with or without a request sub-command
self._add_global_options(self.parser)
# Get specified profile (or empty profile if none specified)
profile = self._get_profile(known_args.profile)
# Add saved requests as sub commands
subparsers = self.parser.add_subparsers(
title='Requests',
help='The request to execute'
)
for request in self.requests:
# Add sub-parser for request
request_parser = subparsers.add_parser(
request.name,
description=request.__str__(),
formatter_class=argparse.RawDescriptionHelpFormatter
)
# Set variable name to request name so we can tell the request that is specified
request_parser.set_defaults(request=request.name)
# Add options common to all sub-commands
self._add_global_options(request_parser)
# Add HTTP request options like proxy and SSL verification
options_group = request_parser.add_argument_group(
title='Options',
description='Options to use when making HTTP requests'
)
options_group.add_argument(
'--' + PROXY,
default=[],
action='append',
metavar='host:port',
help='Maps a protocol to a proxy. For example: "http://user:[email protected]:8080". ' +
'Multiple proxies can be defined for different protocols.'
)
no_verify_mx_group = options_group.add_mutually_exclusive_group()
no_verify_mx_group.add_argument(
'--' + VERIFY,
dest=VERIFY,
action='store_true',
help='Verify SSL certificates.'
)
no_verify_mx_group.add_argument(
'--no-' + VERIFY,
action='store_false',
dest=VERIFY,
help='Do not verify SSL certificates.'
)
# Get default verify setting from options
no_verify_mx_group.set_defaults(verify=self.options.get_verify())
# Setup optional and required variables for each request. Optional variables have a name-value pair
# in the user specified profile and required variables don't.
optional_group = None # Only create the group if it is needed
required_group = None
for variable in request.get_variable_list():
if variable in profile.properties:
# Variable exists in profile, so it should be optional
if not optional_group:
# Create optional group if it doesn't exist
optional_group = request_parser.add_argument_group(
title='Optional variable arguments',
description='Variables that have a default value in the active profile ' +
'(' + profile.name + ')'
)
optional_group.add_argument(
'--'+variable,
help='Default value from profile: ' + profile.properties.get(variable),
default=profile.properties.get(variable)
)
else:
# Variable does not exist in the profile so it is required
if not required_group:
# Create required group if it doesn't exist
required_group = request_parser.add_argument_group(
title='Required variable arguments',
description='Variables that have no default value in the active profile ' +
'(' + profile.name + ')'
)
required_group.add_argument(variable)
def parse_args(self):
dictionary_args = vars(self.parser.parse_args())
# The proxy key will contain a list of proxies in the format:
# ['http://proxy.com:8080', 'https://proxy.com:8081']
# Remove the list of proxies from the cli args and put an
# empty dictionary in its place.
proxy_list = dictionary_args.pop(PROXY, [])
dictionary_args[PROXY] = {}
for proxy in proxy_list:
# Split the proxy into protocol and hostname
split_proxy = proxy.split(':', 1)
dictionary_args[PROXY][split_proxy[0]] = proxy
self.args = dictionary_args
return self.args
def get_profile(self, default):
if self.args.no_profile:
return None
if self.args.profile is not None:
return self.args.profile
else:
return default
def _get_profile_names(self):
profile_names = list()
for profile in self.profiles:
profile_names.append(str(profile.name))
return profile_names
def _get_profile(self, name):
empty_profile = Profile({'name': 'none'})
for profile in self.profiles:
if name == profile.name:
return profile
return empty_profile
def _add_global_options(self, parser):
profiles_group = parser.add_argument_group(
title='Profiles',
description='Indicates which profile to use, if any, for variable substitution'
)
profiles_mx_group = profiles_group.add_mutually_exclusive_group()
profiles_mx_group.add_argument(
'--profile',
'-p',
choices=self._get_profile_names(),
help='The name of the profile to use for variable substitution'
)
profiles_mx_group.add_argument(
'--no-profile',
action="store_true",
default=False,
help='No profile will be used for variable substitution'
)
parser.add_argument(
'--' + DEBUG,
'-d',
action='store_true',
help=argparse.SUPPRESS
)
return parser
|
StarcoderdataPython
|
3312891
|
# We use word2vec instead of glove embedding in this file
# This word2vec is a self-trained one
import argparse
import json
import os
import pickle
from itertools import chain
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas
import seaborn as sns
from gensim.models import Word2Vec
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Bidirectional, Embedding, Dropout, SpatialDropout1D, Dense, LSTM, \
BatchNormalization
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.python.keras.utils.vis_utils import plot_model
from tensorflow.python.ops.init_ops import Constant
from tqdm import trange
# Set up a argument parser
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=300, required=False)
parser.add_argument("--bs", type=int, default=64, required=False)
parser.add_argument("--lr", type=float, default=0.001, required=False)
parser.add_argument("--model", type=str, choices=["lstm_bilstm", "bilstm", "bilstm_bilstm"], default="bilstm_bilstm",
required=False, help="The model to train the NER")
parser.add_argument("--layers", type=int, default=2, required=False, help="The number of BiLSTM layers you want to try")
args = parser.parse_args()
print(args)
# Set up some parameter we can use
epochs = args.epoch
BS = args.bs
LR = args.lr
# Load the data for three splits
train_dict = pickle.load(open("./data/train.pkl", 'rb'))
val_dict = pickle.load(open("./data/val.pkl", 'rb'))
test_dict = pickle.load(open("./data/test.pkl", 'rb'))
total_list = train_dict["word_seq"] + val_dict["word_seq"] + test_dict["word_seq"]
model = Word2Vec(total_list, size=300, window=5, min_count=1, workers=4)
# model.train(val_dict["word_seq"], total_examples=2950, epochs=5)
# model.train(test_dict["word_seq"], total_examples=2950, epochs=5)
print("Train word2vec model down.")
# Give all the words appeared in our corpus their glove embedding, for those who are not exist, random initialize them
encoded_dict = {}
count = 0
total = 0
word2vec_keys = model.wv.vocab.keys()
dimension = 300
for i in [train_dict, val_dict, test_dict]:
for j in trange(len(i['word_seq'])):
for word in i['word_seq'][j]:
if word not in word2vec_keys:
encoded_dict[word] = np.random.rand(1, dimension)[0]
count += 1
total += 1
else:
encoded_dict[word] = model.wv[word]
total += 1
# Test how many words are found in glove and how many are randomly initialized
print("words not found {}".format(count))
print("words total {}".format(total))
print(len(encoded_dict))
if not os.path.exists('./word2vec'):
os.mkdir('./word2vec/')
np.save("./word2vec/encoded_dict_{}d.npy".format(dimension), encoded_dict)
# Build a dict that records the word to a single unique integer, and our encoded matrix for word embedding
encoded_word2id = {}
encoded_matrix = np.zeros((len(encoded_dict.keys()), dimension), dtype=float)
for i, word in enumerate(encoded_dict.keys()):
encoded_word2id[word] = i
encoded_matrix[i] = encoded_dict[word]
print(encoded_matrix.shape)
np.save("./word2vec/encoded_matrix_{}d.npy".format(dimension), encoded_matrix)
# Build the tag <--> index dictionary and add PAD tag into it
tag_list = list(set(chain(*train_dict["tag_seq"])))
tag_to_index_dict = {t: i for i, t in enumerate(tag_list)}
index_to_tag_dict = {i: t for i, t in enumerate(tag_list)}
# save out dictionary for generation
if not os.path.exists('./lstm_model'):
os.mkdir('./lstm_model/')
if not os.path.exists('./lstm_results'):
os.mkdir('./lstm_results')
np.save("./lstm_model/model_tag2id_e{}_bs{}.npy".format(epochs, BS), tag_to_index_dict)
np.save("./lstm_model/model_id2tag_e{}_bs{}.npy".format(epochs, BS), index_to_tag_dict)
# Load some parameters for deep learning
embedding_dim = dimension
num_words = len(encoded_dict)
input_length = 128
n_tags = len(tag_to_index_dict)
print(embedding_dim, num_words, input_length, n_tags)
# Set our model
def get_bi_lstm_model():
model = Sequential()
model.add(
Embedding(num_words, embedding_dim, embeddings_initializer=Constant(encoded_matrix), input_length=input_length,
trainable=True))
model.add(SpatialDropout1D(0.2))
model.add(BatchNormalization())
if args.model == "lstm_bilstm":
model.add(LSTM(128, return_sequences=True))
elif args.model == "bilstm_bilstm":
for _ in range(args.layers):
model.add(Bidirectional(LSTM(128, return_sequences=True)))
model.add(BatchNormalization())
model.add(Dropout(0.15))
adam = Adam(lr=LR, beta_1=0.9, beta_2=0.999)
model.add(Dense(units=n_tags, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
model.summary()
return model
# Define the function to train our model
def train_model(X, y, val_X, val_y, model):
hist = model.fit(X, y, batch_size=BS, verbose=1, epochs=epochs, validation_data=(val_X, val_y), shuffle=True)
return hist
# build our model and print the summary
model_bi_lstm_lstm = get_bi_lstm_model()
try:
plot_model(model_bi_lstm_lstm, show_shapes=True)
except ImportError:
pass
# Use the dict we've prepared before to do the embedding and transformation
train_input = np.array(
[[encoded_word2id[word] for word in train_dict['word_seq'][i]] for i in range(len(train_dict['word_seq']))])
val_input = np.array(
[[encoded_word2id[word] for word in val_dict['word_seq'][i]] for i in range(len(val_dict['word_seq']))])
test_input = np.array(
[[encoded_word2id[word] for word in test_dict['word_seq'][i]] for i in range(len(test_dict['word_seq']))])
train_output = np.array(
[[tag_to_index_dict[tag] for tag in train_dict['tag_seq'][i]] for i in range(len(train_dict['tag_seq']))])
val_output = np.array(
[[tag_to_index_dict[tag] for tag in val_dict['tag_seq'][i]] for i in range(len(val_dict['tag_seq']))])
# Check the shape of our input, their first dimension must be the same
print(train_input.shape, val_input.shape, test_input.shape)
print(train_output.shape, val_output.shape)
# Train our model and save the loss recording
history = train_model(train_input, train_output, val_input, val_output, model_bi_lstm_lstm)
# Do some visualization
sns.set_style(style="darkgrid")
sns.set(font_scale=1.75)
plt.rcParams["figure.figsize"] = (30, 15)
mpl.use('Agg')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
# plt.show()
plt.savefig(
'./lstm_results/accuracy_BS{}E{}LR{}_{}d_{}layer.png'.format(BS, epochs, LR, dimension,
args.layers))
plt.clf()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
# plt.show()
plt.savefig(
'./lstm_results/model_loss_BS{}E{}LR{}_{}d_{}layers.png'.format(BS, epochs, LR,
dimension, args.layers))
print("save images down.")
# Save the validation accuracy for us to find the best model trained
np.save(
'./lstm_results/model_results_val_BS{}E{}LR{}_{}d_{}layers.npy'.format(BS, epochs, LR,
dimension, args.layers),
history.history['val_accuracy'])
print("save history validation data down.")
# Save our trained model and open up a answer csv, initialize all the id
# try:
# model_bi_lstm_lstm.save(
# './lstm_model/model_BS{}E{}LR{}_{}d_{}layers.pkl'.format(BS, epochs, LR, dimension, args.layers))
# except:
# pass
answer = pandas.DataFrame(columns=['id', 'labels'])
answer['id'] = test_dict['id']
# Predict on the test dict and save it to answer csv file
predict = model_bi_lstm_lstm.predict(test_input)
for i in range(len(answer)):
sentence_tag = []
for j in range(128):
tag = index_to_tag_dict[np.argmax(predict[i][j])]
sentence_tag.append(tag)
answer.loc[i, 'labels'] = json.dumps(sentence_tag)
answer.to_csv(
'./lstm_results/answer_BS{}E{}LR{}_{}d_{}layers.csv'.format(BS, epochs, LR, dimension,
args.layers), index=True)
print("save results csv down.")
|
StarcoderdataPython
|
4819546
|
from mmdet.models.losses import FocalLoss, SmoothL1Loss, binary_cross_entropy
from .chamfer_distance import ChamferDistance, chamfer_distance
__all__ = [
'FocalLoss', 'SmoothL1Loss', 'binary_cross_entropy', 'ChamferDistance',
'chamfer_distance'
]
|
StarcoderdataPython
|
1790093
|
from torch.utils.data import Dataset
import pymongo
import json
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
class MongoWrapper:
"""
Load single turn Q,A data
"""
def __init__(self, config_path, filter_func=None):
"""
1. MongoDB collection들을 통합된 인덱스로 접근할 수 있음
2. 개별 collection의 idx는 개수, 순서, 유니크를 보장해야함
:param config_path: db config 경로
"""
with open(config_path) as fp:
db_config = json.load(fp)
self.db_config = db_config
self.filter_func = filter_func
conn_str = db_config['MONGO_CONNECTION_STRING']
con_db = db_config['MONGO_CONNECTION_DB']
collection_list = db_config['COLLECTIONS']
self.connection = pymongo.MongoClient(conn_str)
self.db = self.connection.get_database(con_db)
self.collections = self._load_collections(collection_list)
self.meta_info = self._load_metainfo(collection_list)
self.ndoc = None
logging.info("[Mongo]: Loaded %s" % self.meta_info)
def __len__(self):
if not self.ndoc:
ndoc = 0
for value in self.meta_info.values():
ndoc += value['num_docs']
self.ndoc = ndoc
return self.ndoc
def __getitem__(self, idx):
docs = []
if isinstance(idx, slice):
for nidx in range(idx.start, idx.stop):
collection_name, idx = self._convert_idx(nidx)
data = self.collections[collection_name].find({'idx': idx})[0]
if self.filter_func:
data = self.filter_func(data)
doc = {'data': data, 'collection_name': collection_name}
docs.append(doc)
return docs
else:
collection_name, idx = self._convert_idx(idx)
data = self.collections[collection_name].find({'idx': idx})[0]
if self.filter_func:
data = self.filter_func(data)
doc = {'data': data, 'collection_name': collection_name}
docs.append(doc)
return docs
def _load_collections(self, collection_list):
if not isinstance(collection_list, list):
collection_list = [collection_list]
collections = dict()
for col in collection_list:
collections[col] = self.db[col]
logger.info("[Mongo]: %s is loaded" % col)
return collections
def _load_metainfo(self, collection_list):
meta_info_conn = self.db['meta_info']
meta_info = OrderedDict()
for item in list(meta_info_conn.find({})):
if item['collection_name'] not in collection_list:
continue
collection_name = item['collection_name']
sub_dict = {'num_docs': item['num_docs']}
meta_info.update({collection_name: sub_dict})
prev = 0
for name, info in meta_info.items():
sub_info = {'sidx': prev, 'eidx': prev + info['num_docs']}
prev = prev + info['num_docs']
info.update(sub_info)
return meta_info
def _convert_idx(self, idx):
"""
collection 따라서 idx 를 변환하기
:param idx:
:return:
"""
collection_name = None
for name, info in self.meta_info.items():
if idx >= info['sidx'] and idx < info['eidx']:
idx = idx - info['sidx']
collection_name = name
break
return collection_name, idx
def _get_update_op(self, doc, fields):
if not isinstance(fields, list):
fields = [fields]
set_dict = dict()
for f in fields:
set_dict[f] = doc[f]
return pymongo.UpdateOne({'_id': doc['_id']}, {"$set": set_dict}, upsert=True)
def _get_insert_op(self, doc):
return pymongo.InsertOne(doc)
def update_docs(self, docs, fields):
if not isinstance(docs, list):
docs = [docs]
ops = []
for doc in docs:
op = self._get_update_op(doc, fields)
ops.append(op)
return ops
def insert_docs(self, docs, collection_name):
if collection_name not in self.collections:
raise KeyError
if not isinstance(docs, list):
docs = [docs]
ops = []
for doc in docs:
op = self._get_insert_op(doc)
ops.append(op)
# logging.info(ops[:10])
self.collections[collection_name].bulk_write(ops, ordered=False)
def update_meta_info(self, collection_name):
is_update = False
if collection_name in self.meta_info:
is_update = True
total_docs = self.collections[collection_name].count_documents({})
logging.info("[Update]: collection - %s " % collection_name)
logging.info("[Update]: total docs - %s " % total_docs)
logging.info("[Update]: meta info - %s " % is_update)
if is_update:
self.db['meta_info'].update_one({'collection_name': collection_name},
{'$set':{'num_docs': total_docs}})
else:
self.db['meta_info'].insert_one({'collection_name': collection_name,
'num_docs': total_docs})
collection_list = self.db_config['COLLECTIONS']
self.meta_info = self._load_metainfo(collection_list)
def export_to_file(self, fpath, collection_name):
logging.info("[Export]: %s" % fpath)
info = self.meta_info[collection_name]
info = dict(info)
num_docs = int(info['num_docs'])
with open(fpath, 'w') as fp:
text_lines = []
for idx in range(num_docs):
doc = self.__getitem__(idx)[0]
text = doc['data']['filt_text']
text += '\n'
text_lines.append(text)
if idx % 10000 == 0:
fp.writelines(text_lines)
text_lines = []
logging.info("[Write]: %d" % idx)
def create_single_index(self, collection_name, index_name, order=1):
self.collections[collection_name].create_index([(index_name, order)])
|
StarcoderdataPython
|
1619897
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:48 ms, 在所有 Python3 提交中击败了94.04% 的用户
内存消耗:15.3 MB, 在所有 Python3 提交中击败了26.11% 的用户
解题思路:
递归
具体实现见代码注释
"""
class Solution:
def hasPathSum(self, root: TreeNode, sum: int) -> bool:
def find(root, current):
if root:
current += root.val
if root.left:
if find(root.left, current): # 处理左子树
return True
if root.right:
if find(root.right, current): # 处理右子树
return True
if not (root.left or root.right): # 如果是叶节点,判断当前和
if current == sum:
return True
if find(root, 0):
return True
else:
return False
|
StarcoderdataPython
|
1658865
|
<reponame>tarvitz/face-check<gh_stars>0
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
"""
standard django user
"""
is_verified = models.BooleanField(
_('is verified'),
default=False,
help_text=_("flag that shows if user is verified and can "
"obtain main functionality")
)
class Secret(models.Model):
content = models.TextField(
_("content"),
help_text=_("secret content, i.e. password or something you should "
"not tell without using secure channels")
)
created_at = models.DateField(auto_now=True)
updated_at = models.DateField(auto_now_add=True)
def __str__(self):
return self.content
class Meta:
verbose_name = _("Secret")
verbose_name_plural = _("Secrets")
ordering = ['-created_at']
|
StarcoderdataPython
|
1638900
|
<reponame>arrdem/source
import os
import re
import sys
def main(opts, args):
"""Usage: python rollback.py date
Parse /var/log/pacman.log, enumerating package transactions since the
specified date and building a plan for restoring the state of your system to
what it was at the specified date.
Assumes:
- /var/log/pacman.log has not been truncated
- /var/cache/pacman/pkg has not been flushed and still contains all required
packages
- The above paths are Arch default and have not been customized
- That it is not necessary to remove any "installed" packages
Note: no attempt is made to inspect the dependency graph of packages to be
downgraded to detect when a package is already transitively listed for
downgrading. This can create some annoying errors where eg. systemd will be
downgraded, meaning libsystemd will also be downgraded, but pacman considers
explicitly listing the downgrade of libsystemd when it will already be
transitively downgraded an error.
"""
date, = args
print("Attempting to roll back package state to that of {0}...\n"
.format(date),
file=sys.stderr)
# These patterns can't be collapsed because we want to select different
# version identifying strings depending on which case we're in. Not ideal,
# but it works.
# Ex. [2017-04-01 09:51] [ALPM] upgraded filesystem (2016.12-2 -> 2017.03-2)
upgraded_pattern = re.compile(
".*? upgraded (?P<name>\w+) \((?P<from>[^ ]+) -> (?P<to>[^\)]+)\)")
# Ex: [2018-02-23 21:18] [ALPM] downgraded emacs (25.3-3 -> 25.3-2)
downgraded_pattern = re.compile(
".*? downgraded (?P<name>\w+) \((?P<to>[^ ]+) -> (?P<from>[^\)]+)\)")
# Ex: [2017-03-31 07:05] [ALPM] removed gdm (3.22.3-1)
removed_pattern = re.compile(
".*? removed (?P<name>\w+) \((?P<from>[^ ]+)\)")
checkpoint = {}
flag = False
with open("/var/log/pacman.log") as logfile:
for line in logfile:
if date in line:
flag = True
elif not flag:
continue
match = re.match(upgraded_pattern, line)\
or re.match(downgraded_pattern, line)\
or re.match(removed_pattern, line)
if match:
package = match.group("name")
from_rev = match.group("from")
if package not in checkpoint:
checkpoint[package] = from_rev
continue
print("Checkpoint state:")
for k in checkpoint.keys():
print("{0} -> {1}".format(k, checkpoint[k]), file=sys.stderr)
pkgcache = "/var/cache/pacman/pkg"
pkgs = os.listdir(pkgcache)
pkgnames = ["{0}-{1}".format(k, v) for k, v in checkpoint.items()]
selected_pkgs = set([os.path.join(pkgcache, p)
for n in pkgnames
for p in pkgs
if n in p])
print("Suggested incantation:\n", file=sys.stderr)
print("sudo pacman --noconfirm -U {}"
.format("\\\n ".join(selected_pkgs)))
if __name__ == "__main__":
main(None, sys.argv[1:])
|
StarcoderdataPython
|
1616411
|
import sys
import argparse
import os
from video_classification.generator.attention_cnn_lstm_classifer import BidirectionalLSTMVideoClassifier
def check_args(args):
if not os.path.exists(args.model_path):
print('Model path {} does not exist, please check.')
exit(1)
if not os.path.exists(args.video_path):
print('Video path {} does not exist, please check.')
exit(1)
if not os.path.exists(args.config_path):
print('Config file {} does not exist, please check.')
exit(1)
return args
def parse_args(args):
parser = argparse.ArgumentParser(description='Simple script for attention integrated CNN + LSTM video classification')
parser.add_argument('cnn_model', help='Specify which CNN model is used (VGG16/VGG19/InceptionV3/Resnet50/Xception')
parser.add_argument('model_path', help='Specify the model path')
parser.add_argument('video_path', help='Specify the input video path')
parser.add_argument('config_path', help='Specify the config file path')
return parser.parse_args(args)
def main(args=None):
if args is None:
args = sys.argv[1:]
args = parse_args(args)
classifier = BidirectionalLSTMVideoClassifier(args.cnn_model, args.model_path)
predicted_label = classifier.predict(args.video_path, args.config_path)
print('{} belongs to {}'.format(args.video_path, predicted_label))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3349317
|
import csv
class IngAutCSV(csv.Dialect):
delimiter = ";"
quotechar = '"'
quoting = csv.QUOTE_MINIMAL
lineterminator = "\r\n"
def do_import(filename, store):
ing_file = open(filename, newline="", encoding="latin1")
# Convert the actual data
for record in csv.DictReader(ing_file, dialect=IngAutCSV):
t = store.new_transaction()
t.Date = record["ValutaDatum"].replace(".", "/")
t.Memo = record["Text"]
t.Inflow = record["Haben"]
t.Outflow = record["Soll"]
store.record_transaction(t)
|
StarcoderdataPython
|
193309
|
import pickle
from collections import defaultdict, namedtuple
import numpy as np
import argparse
import os
import model.config as config
import preprocessing.util as util
from termcolor import colored
import tensorflow as tf
class VocabularyCounter(object):
"""counts the frequency of each word and each character in the corpus. With each
file that it processes it increases the counters. So one frequency vocab for all the files
that it processes."""
def __init__(self, lowercase_emb=False):
import gensim
self.model = gensim.models.KeyedVectors.load_word2vec_format(
config.base_folder+"data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin", binary=True)
"""lowercase_emb=False if True then we lowercase the word for counting of
frequencies and hence for finding the pretrained embedding."""
self.word_freq = defaultdict(int)
self.char_freq = defaultdict(int) # how many times each character is encountered
self.lowercase_emb = lowercase_emb
self.not_in_word2vec_cnt = 0
self.all_words_cnt = 0
def add(self, filepath):
"""the file must be in the new dataset format."""
with open(filepath) as fin:
for line in fin:
if line.startswith("DOCSTART_") or line.startswith("DOCEND") or\
line.startswith("MMSTART_") or line.startswith("MMEND") or \
line.startswith("*NL*"):
continue
line = line.rstrip() # omit the '\n' character
word = line.lower() if self.lowercase_emb else line
self.all_words_cnt += 1
if word not in self.model:
self.not_in_word2vec_cnt += 1
else:
self.word_freq[word] += 1
for c in line:
self.char_freq[c] += 1
def print_statistics(self, word_edges=None,
char_edges=None):
"""Print some statistics about word and char frequency."""
if word_edges is None:
word_edges = [1, 2, 3, 6, 11, 21, 31, 51, 76, 101, 201, np.inf]
if char_edges is None:
char_edges = [1, 6, 11, 21, 51, 101, 201, 501, 1001, 2001, np.inf]
print("not_in_word2vec_cnt = ", self.not_in_word2vec_cnt)
print("all_words_cnt = ", self.all_words_cnt)
print("some frequency statistics. The bins are [...) ")
for d, name, edges in zip([self.word_freq, self.char_freq], ["word", "character"], [word_edges, char_edges]):
hist_values, _ = np.histogram(list(d.values()), edges)
cum_sum = np.cumsum(hist_values[::-1])
print(name, " frequency histogram, edges: ", edges)
print("absolute values: ", hist_values)
print("absolute cumulative (right to left): ", cum_sum[::-1])
print("probabilites cumulative (right to left):", (cum_sum / np.sum(hist_values))[::-1])
def serialize(self, folder=None, name="vocab_freq.pickle"):
if folder is None:
folder = config.base_folder+"data/vocabulary/"
if not os.path.exists(folder):
os.makedirs(folder)
with open(folder+name, 'wb') as handle:
pickle.dump((self.word_freq, self.char_freq), handle)
def count_datasets_vocabulary(self):
# **YD** change the directory location
new_dataset_folder = config.base_folder+"data/new_datasets/"
# new_dataset_folder = config.base_folder + "data/yd_datasets/"
"""
datasets = ['aida_train.txt', 'aida_dev.txt', 'aida_test.txt', 'ace2004.txt',
'aquaint.txt', 'clueweb.txt', 'msnbc.txt', 'wikipedia.txt']
"""
for dataset in util.get_immediate_files(new_dataset_folder):
dataset = os.path.basename(os.path.normpath(dataset))
print("Processing dataset: ", dataset)
self.add(new_dataset_folder+dataset)
self.print_statistics()
self.serialize(folder=config.base_folder+"data/vocabulary/",
name="vocab_freq.pickle")
def build_word_char_maps():
output_folder = config.base_folder+"data/tfrecords/"+args.experiment_name+"/"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
with open(config.base_folder+"data/vocabulary/vocab_freq.pickle", 'rb') as handle:
word_freq, char_freq = pickle.load(handle)
word2id = dict()
id2word = dict()
char2id = dict()
id2char = dict()
wcnt = 0 # unknown word
word2id["<wunk>"] = wcnt
id2word[wcnt] = "<wunk>"
wcnt += 1
ccnt = 0 # unknown character
char2id["<u>"] = ccnt
id2char[ccnt] = "<u>"
ccnt += 1
# for every word in the corpus (we have already filtered out the words that are not in word2vec)
for word in word_freq:
if word_freq[word] >= args.word_freq_thr:
word2id[word] = wcnt
id2word[wcnt] = word
wcnt += 1
for c in char_freq:
if char_freq[c] >= args.char_freq_thr:
char2id[c] = ccnt
id2char[ccnt] = c
ccnt += 1
assert(len(word2id) == wcnt)
assert(len(char2id) == ccnt)
print("words in vocabulary: ", wcnt)
print("characters in vocabulary: ", ccnt)
with open(output_folder+"word_char_maps.pickle", 'wb') as handle:
pickle.dump((word2id, id2word, char2id, id2char, args.word_freq_thr,
args.char_freq_thr), handle)
import gensim
model = gensim.models.KeyedVectors.load_word2vec_format(
config.base_folder+"data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin", binary=True)
embedding_dim = len(model['queen'])
embeddings_array = np.empty((wcnt, embedding_dim)) # id2emb
embeddings_array[0] = np.zeros(embedding_dim)
for i in range(1, wcnt):
embeddings_array[i] = model[id2word[i]]
np.save(output_folder+'embeddings_array.npy', embeddings_array)
return word2id, char2id
def build_word_char_maps_restore():
output_folder = config.base_folder+"data/tfrecords/"+args.experiment_name+"/"
with open(output_folder+"word_char_maps.pickle", 'rb') as handle:
word2id, _, char2id, _, _, _ = pickle.load(handle)
return word2id, char2id
class Chunker(object):
def __init__(self):
self.separator = args.chunking
self.chunk_ending = {'DOCEND'}
if self.separator == "per_paragraph":
self.chunk_ending.add('*NL*')
if self.separator == "per_sentence":
self.chunk_ending.add('.')
self.chunk_ending.add('*NL*')
self.parsing_errors = 0
def new_chunk(self):
self.chunk_words = []
self.begin_gm = [] # the starting positions of gold mentions
self.end_gm = [] # the end positions of gold mentions
self.ground_truth = [] # list with the correct entity ids
def compute_result(self, docid):
chunk_id = docid
if self.separator == "per_paragraph":
chunk_id = chunk_id + "&*" + str(self.par_cnt)
if self.separator == "per_sentence":
chunk_id = chunk_id + "&*" + str(self.par_cnt) + "&*" + str(self.sent_cnt)
result = (chunk_id, self.chunk_words, self.begin_gm, self.end_gm, self.ground_truth)
# correctness checks. not necessary
no_errors_flag = True
if len(self.begin_gm) != len(self.end_gm) or \
len(self.begin_gm) != len(self.ground_truth):
no_errors_flag = False
for b, e in zip(self.begin_gm, self.end_gm):
if e <= b or b >= len(self.chunk_words) or e > len(self.chunk_words):
no_errors_flag = False
self.new_chunk()
if no_errors_flag == False:
self.parsing_errors += 1
print("chunker parse error: ", result)
return None
else:
return result
def process(self, filepath):
with open(filepath) as fin:
self.new_chunk()
docid = ""
# paragraph and sentence counter are not actually useful. only for debugging purposes.
self.par_cnt = 0 # paragraph counter (useful if we work per paragraph)
self.sent_cnt = 0 # sentence counter (useful if we work per sentence)
for line in fin:
line = line.rstrip() # omit the '\n' character
if line in self.chunk_ending:
if len(self.chunk_words) > 0: # if we have continues *NL* *NL* do not return empty chunks
temp = self.compute_result(docid)
if temp is not None:
yield temp
# do not add the chunk separator, no use
if line == '*NL*':
self.par_cnt += 1
self.sent_cnt = 0
if line == '.':
self.sent_cnt += 1
elif line == '*NL*':
self.par_cnt += 1
self.sent_cnt = 0
# do not add this in our words list
elif line == '.':
self.sent_cnt += 1
self.chunk_words.append(line)
elif line.startswith('MMSTART_'):
ent_id = line[8:] # assert that ent_id in wiki_name_id_map
self.ground_truth.append(ent_id)
self.begin_gm.append(len(self.chunk_words))
elif line == 'MMEND':
self.end_gm.append(len(self.chunk_words))
elif line.startswith('DOCSTART_'):
docid = line[9:]
self.par_cnt = 0
self.sent_cnt = 0
else:
self.chunk_words.append(line)
print(filepath, " chunker parsing errors: ", self.parsing_errors)
self.parsing_errors = 0
GmonlySample = namedtuple("GmonlySample",
["chunk_id", "chunk_words", 'begin_gm', "end_gm",
"ground_truth", "cand_entities", "cand_entities_scores"])
AllspansSample = namedtuple("AllspansSample",
["chunk_id", "chunk_words", "begin_spans", "end_spans",
"ground_truth", "cand_entities", "cand_entities_scores",
"begin_gm", "end_gm"])
class SamplesGenerator(object):
def __init__(self, mode="allspans"):
self.mode = mode
self._generator = Chunker()
self.fetchFilteredCoreferencedCandEntities = util.FetchFilteredCoreferencedCandEntities(args)
self.all_gm_misses = 0
self.all_gt_misses = 0
self.all_gm = 0 # all the gm encountered in all the datasets
def set_gmonly_mode(self):
self.mode = "gmonly"
def set_allspans_mode(self):
self.mode = "allspans"
def is_gmonly_mode(self):
return True if self.mode == "gmonly" else False
def is_allspans_mode(self):
return True if self.mode == "allspans" else False
def process(self, filepath):
if self.is_allspans_mode():
return self._process_allspans(filepath)
else:
return self._process_gmonly(filepath)
def _process_allspans(self, filepath):
gt_misses = 0
gm_misses = 0
gm_this_file = 0 # how many gold mentions are in this document - dataset. so we can find percentage for misses
max_mention_width_violations = 0
for chunk in self._generator.process(filepath):
self.fetchFilteredCoreferencedCandEntities.init_coref(el_mode=True)
begin_spans = []
end_spans = []
cand_entities = [] # list of lists candidate entities
cand_entities_scores = []
chunk_id, chunk_words, begin_gm, end_gm, ground_truth = chunk
gm_this_file += len(begin_gm)
for left, right in self.all_spans(chunk_words):
cand_ent, scores = self.fetchFilteredCoreferencedCandEntities.process(left, right, chunk_words)
if cand_ent is not None:
begin_spans.append(left)
end_spans.append(right)
cand_entities.append(cand_ent)
cand_entities_scores.append(scores)
if args.calculate_stats:
# check if gold mentions are inside the candidate spans and if yes check if ground truth is in cand ent.
gm_spans = list(zip(begin_gm, end_gm)) # [(3, 5), (10, 11), (15, 18)]
all_spans = list(zip(begin_spans, end_spans))
for i, gm_span in enumerate(gm_spans):
if gm_span not in all_spans:
gm_misses += 1
#print("gm not in spans\t\t\t", colored(' '.join(chunk_words[gm_span[0]:gm_span[1]]), 'red'))
elif ground_truth[i] not in cand_entities[all_spans.index(gm_span)]:
gt_misses += 1
#print("gt not in cand ent", colored(' '.join(chunk_words[gm_span[0]:gm_span[1]]), 'green'))
#print("gt: ", ground_truth[i], "cand_ent: ", cand_entities[all_spans.index(gm_span)])
for b, e in zip(begin_gm, end_gm):
if e - b > args.max_mention_width:
max_mention_width_violations += 1
if begin_spans: # there are candidate spans in the processed text
yield AllspansSample(chunk_id, chunk_words, begin_spans, end_spans,
ground_truth, cand_entities, cand_entities_scores,
begin_gm, end_gm)
if args.calculate_stats:
print("max_mention_width_violations :", max_mention_width_violations)
print("gt_misses", gt_misses)
print("gm_misses", gm_misses)
print("gm_this_file: ", gm_this_file)
print("recall % : ", (1 - (gm_misses+gt_misses)/gm_this_file)*100, " %")
self.all_gt_misses += gt_misses
self.all_gm_misses += gm_misses
self.all_gm += gm_this_file
@staticmethod
def all_spans(chunk_words):
# this function produces all possible text spans that do not include spans separators (fullstops).
# divide the list of words to lists of lists based on spans_separator.
# e.g. if chunk_words is for the whole document divide it to sentences (a list of
# sentences) since no span extend above a fullstop.
separation_indexes = []
spans_separator = set(config.spans_separators)
for idx, word in enumerate(chunk_words):
if word in spans_separator:
separation_indexes.append(idx)
separation_indexes.append(len(chunk_words))
def all_spans_aux(begin_idx, end_idx):
for left_idx in range(begin_idx, end_idx):
for length in range(1, args.max_mention_width + 1):
if left_idx + length > end_idx:
break
yield left_idx, left_idx + length
begin_idx = 0
for end_idx in separation_indexes:
for left, right in all_spans_aux(begin_idx, end_idx):
# print(left, right, chunk_words[left:right])
# print(left, right, ' '.join(chunk_words[left:right])
yield left, right
begin_idx = end_idx + 1
def _process_gmonly(self, filepath):
gt_misses = 0
gm_misses = 0
gm_this_file = 0
max_mention_width_violations = 0
for chunk in self._generator.process(filepath):
self.fetchFilteredCoreferencedCandEntities.init_coref(el_mode=False)
cand_entities = [] # list of lists candidate entities
cand_entities_scores = []
chunk_id, chunk_words, begin_gm, end_gm, ground_truth = chunk
gm_this_file += len(begin_gm)
for left, right, gt in zip(begin_gm, end_gm, ground_truth):
cand_ent, scores = self.fetchFilteredCoreferencedCandEntities.process(left, right, chunk_words)
if cand_ent is None:
gm_misses += 1
cand_ent, scores = [], []
#print("gm not in p_e_m\t\t\t", colored(' '.join(chunk_words[left:right]), 'red'))
elif args.calculate_stats and gt not in cand_ent:
gt_misses += 1
#print("gt not in cand ent", colored(' '.join(chunk_words[left:right]), 'green'))
#print("gt: ", gt, "cand_ent: ", cand_ent)
if right - left > args.max_mention_width:
max_mention_width_violations += 1
#print(' '.join(chunk_words[left:right])
#print(cand_ent, scores)
cand_entities.append(cand_ent)
cand_entities_scores.append(scores)
if begin_gm: #not emtpy
yield GmonlySample(chunk_id, chunk_words, begin_gm, end_gm, ground_truth,
cand_entities, cand_entities_scores)
if args.calculate_stats:
print("max_mention_width_violations :", max_mention_width_violations)
print("gt_misses", gt_misses)
print("gm_misses", gm_misses)
print("gm_this_file", gm_this_file)
print("recall % : ", (1 - (gm_misses+gt_misses)/gm_this_file)*100, " %")
self.all_gt_misses += gt_misses
self.all_gm_misses += gm_misses
self.all_gm += gm_this_file
SampleEncoded = namedtuple("SampleEncoded",
["chunk_id",
"words", 'words_len', # list, scalar
'chars', 'chars_len', # list of lists, list
'begin_spans', "end_spans", 'spans_len', # the first 2 are lists, last is scalar
"cand_entities", "cand_entities_scores", 'cand_entities_labels', # lists of lists
'cand_entities_len', # list
"ground_truth", "ground_truth_len",
'begin_gm', 'end_gm']) # list
class EncoderGenerator(object):
"""receives samples Train or Test samples and encodes everything to numbers ready to
be transformed to tfrecords. Also filters out candidate entities that are not in the
entity universe."""
def __init__(self):
self._generator = SamplesGenerator()
self._word2id, self._char2id = build_word_char_maps()
#self._word2id, self._char2id = build_word_char_maps_restore() # alternative
self._wikiid2nnid = util.load_wikiid2nnid(args.entity_extension)
def set_gmonly_mode(self):
self._generator.set_gmonly_mode()
def set_allspans_mode(self):
self._generator.set_allspans_mode()
def is_gmonly_mode(self):
return self._generator.is_gmonly_mode()
def is_allspans_mode(self):
return self._generator.is_allspans_mode()
def process(self, filepath):
ground_truth_errors_cnt = 0
cand_entities_not_in_universe_cnt = 0
samples_with_errors = 0
for sample in self._generator.process(filepath):
words = []
chars = []
for word in sample.chunk_words:
words.append(self._word2id[word] if word in self._word2id
else self._word2id["<wunk>"])
chars.append([self._char2id[c] if c in self._char2id else self._char2id["<u>"]
for c in word])
chars_len = [len(word) for word in chars]
ground_truth_enc = [self._wikiid2nnid[gt] if gt in self._wikiid2nnid else self._wikiid2nnid["<u>"]
for gt in sample.ground_truth]
ground_truth_errors_cnt += ground_truth_enc.count(self._wikiid2nnid["<u>"]) # it is always zero
#print(colored("New sample", 'red'))
#print(sample)
if len(sample.begin_gm) != len(sample.end_gm) or \
len(sample.begin_gm) != len(ground_truth_enc):
samples_with_errors += 1
continue
if isinstance(sample, GmonlySample):
cand_entities, cand_entities_scores, cand_entities_labels, not_in_universe_cnt = \
self._encode_cand_entities_and_labels(
sample.cand_entities, sample.cand_entities_scores, sample.ground_truth)
yield SampleEncoded(chunk_id=sample.chunk_id,
words=words, words_len=len(words),
chars=chars, chars_len=chars_len,
begin_spans=sample.begin_gm, end_spans=sample.end_gm, spans_len=len(sample.begin_gm),
cand_entities=cand_entities, cand_entities_scores=cand_entities_scores,
cand_entities_labels=cand_entities_labels,
cand_entities_len=[len(t) for t in cand_entities],
ground_truth=ground_truth_enc, ground_truth_len=len(sample.ground_truth),
begin_gm=[], end_gm=[])
elif isinstance(sample, AllspansSample):
if len(sample.begin_spans) != len(sample.end_spans):
samples_with_errors += 1
continue
# for each span i have the gt or the value -1 if this span is not a gm
# and then i work in the same way as above
span_ground_truth = []
gm_spans = list(zip(sample.begin_gm, sample.end_gm)) # [(3, 5), (10, 11), (15, 18)]
for left, right in zip(sample.begin_spans, sample.end_spans):
if (left, right) in gm_spans:
span_ground_truth.append(sample.ground_truth[gm_spans.index((left, right))])
else:
span_ground_truth.append(-1) # this span is not a gm
cand_entities, cand_entities_scores, cand_entities_labels, not_in_universe_cnt = \
self._encode_cand_entities_and_labels(
sample.cand_entities, sample.cand_entities_scores, span_ground_truth)
yield SampleEncoded(chunk_id=sample.chunk_id,
words=words, words_len=len(words),
chars=chars, chars_len=chars_len,
begin_spans=sample.begin_spans, end_spans=sample.end_spans, spans_len=len(sample.begin_spans),
cand_entities=cand_entities, cand_entities_scores=cand_entities_scores,
cand_entities_labels=cand_entities_labels,
cand_entities_len=[len(t) for t in cand_entities],
ground_truth=ground_truth_enc, ground_truth_len=len(sample.ground_truth),
begin_gm=sample.begin_gm, end_gm=sample.end_gm)
cand_entities_not_in_universe_cnt += not_in_universe_cnt
print("ground_truth_errors_cnt =", ground_truth_errors_cnt)
print("cand_entities_not_in_universe_cnt =", cand_entities_not_in_universe_cnt)
print("encoder samples_with_errors =", samples_with_errors)
def _encode_cand_entities_and_labels(self, cand_entities_p, cand_entities_scores_p,
ground_truth_p):
"""receives cand_entities (list of lists), and ground_truth (list) and does the following:
1) removes cand ent that are not in our universe
2) creates a label 0, 1 if this candidate is correct or not (i.e. if the span is indeed a
gold mention (row of candidate entities array) and this specific candidate entity (column
of candidate entities array) is correct. Returns the filtered cand_entities
and the corresponding label (they have the same shape)"""
cand_entities = []
cand_entities_scores = []
cand_entities_labels = []
not_in_universe_cnt = 0
for cand_ent_l, cand_scores_l, gt in zip(cand_entities_p, cand_entities_scores_p, ground_truth_p):
ent_l = []
score_l = []
label_l = []
for cand_ent, score in zip(cand_ent_l, cand_scores_l):
if cand_ent in self._wikiid2nnid: # else continue, this entity not in our universe
ent_l.append(self._wikiid2nnid[cand_ent])
score_l.append(score)
label_l.append(1 if cand_ent == gt else 0)
else:
not_in_universe_cnt += 1
cand_entities.append(ent_l)
cand_entities_scores.append(score_l)
cand_entities_labels.append(label_l)
return cand_entities, cand_entities_scores, cand_entities_labels, not_in_universe_cnt
class TFRecordsGenerator(object):
def __init__(self):
self._generator = EncoderGenerator()
def set_gmonly_mode(self):
self._generator.set_gmonly_mode()
def set_allspans_mode(self):
self._generator.set_allspans_mode()
def is_gmonly_mode(self):
return self._generator.is_gmonly_mode()
def is_allspans_mode(self):
return self._generator.is_allspans_mode()
@staticmethod
def _to_sequence_example(sample):
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# Those two create a simple feature. The first a simple feature with one integer, whereas the second a simple
# list of integers as one feature.
def _int64_feature(value):
"""value is a simple integer."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _int64list_feature(value):
"""value is a list of integers."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _int64_feature_list(values):
""" values is a list of integers like the words (words = [2,4,6,8,10])
a feature list where each feature has only one number (a list with fixed
number of elements, specifically only one)"""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _int64list_feature_list(values):
""" like the chars = [[1,2,3], [4,5], [6], [7,8], [9,10,11,12]] a feature list where each feature can have variable
number of ements"""
return tf.train.FeatureList(feature=[_int64list_feature(v) for v in values])
def _floatlist_feature_list(values):
""" like the chars = [[0.1,0.2,0.3], [0.4,0.5]] a feature list where each feature can have variable
number of ements"""
def _floatlist_feature(value):
"""value is a list of integers."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
return tf.train.FeatureList(feature=[_floatlist_feature(v) for v in values])
context = tf.train.Features(feature={
"chunk_id": _bytes_feature(sample.chunk_id.encode('utf-8')),
"words_len": _int64_feature(sample.words_len),
"spans_len": _int64_feature(sample.spans_len),
"ground_truth_len": _int64_feature(sample.ground_truth_len)
})
feature_list = {
"words": _int64_feature_list(sample.words),
"chars": _int64list_feature_list(sample.chars),
"chars_len": _int64_feature_list(sample.chars_len),
"begin_span": _int64_feature_list(sample.begin_spans),
"end_span": _int64_feature_list(sample.end_spans),
"cand_entities": _int64list_feature_list(sample.cand_entities),
"cand_entities_scores": _floatlist_feature_list(sample.cand_entities_scores),
"cand_entities_labels": _int64list_feature_list(sample.cand_entities_labels),
"cand_entities_len": _int64_feature_list(sample.cand_entities_len),
"ground_truth": _int64_feature_list(sample.ground_truth)
}
if isinstance(sample, SampleEncoded):
feature_list["begin_gm"] = _int64_feature_list(sample.begin_gm)
feature_list["end_gm"] = _int64_feature_list(sample.end_gm)
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
sequence_example = tf.train.SequenceExample(context=context, feature_lists=feature_lists)
return sequence_example
def process(self, filepath):
print("processing file: ", filepath)
#the name of the dataset. just extract the last part of path
filename = os.path.basename(os.path.normpath(filepath))[:-4] # omit the '.txt'
output_folder = config.base_folder+"data/tfrecords/"+args.experiment_name+"/"
output_folder += "gmonly/" if self.is_gmonly_mode() else "allspans/"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
writer = tf.python_io.TFRecordWriter(output_folder+filename)
records_cnt = 0
for sample in self._generator.process(filepath):
#print(sample)
sequence_example = self._to_sequence_example(sample)
# write it to file
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
records_cnt += 1
writer.close()
print("records_cnt = ", records_cnt)
def create_tfrecords():
#new_dataset_folder = config.base_folder+"data/new_datasets/"
# **YD**
new_dataset_folder = config.base_folder + "data/yd_datasets/"
datasets = [os.path.basename(os.path.normpath(d)) for d in util.get_immediate_files(new_dataset_folder)]
print("datasets: ", datasets)
tfrecords_generator = TFRecordsGenerator()
tfrecords_generator.set_gmonly_mode()
for file in datasets:
tfrecords_generator.process(filepath=new_dataset_folder+file)
tfrecords_generator.set_allspans_mode()
for file in datasets:
tfrecords_generator.process(filepath=new_dataset_folder+file)
class PrintSamples(object):
def __init__(self, only_misses=True):
_, self.wiki_id_name_map = util.load_wiki_name_id_map()
self.only_misses = only_misses
def print_candidates(self, ent_ids_list):
"""takes as input a list of ent_id and returns a string. This string has each ent_id
together with the corresponding name (in the name withspaces are replaced by underscore)
and candidates are separated with a single space. e.g. ent_id,Barack_Obama ent_id2,US_President"""
acc = []
for ent_id in ent_ids_list:
acc.append(ent_id + "," + self.wiki_id_name_map[ent_id].replace(' ', '_'))
return ' '.join(acc)
def print_sample(self, sample):
chunk_words, begin_gm, end_gm, ground_truth, cand_entities = \
sample.chunk_words, sample.begin_gm, sample.end_gm, sample.ground_truth, sample.cand_entities
if isinstance(sample, GmonlySample):
misses_idx = []
for i, (gt, cand_ent) in enumerate(zip(ground_truth, cand_entities)):
if gt not in cand_ent:
misses_idx.append(i) # miss detected
if self.only_misses and misses_idx:
print(colored("New sample", 'red'))
print(' '.join(chunk_words))
for i in misses_idx:
message = ' '.join(chunk_words[begin_gm[i]:end_gm[i]]) + "\tgt=" + \
self.print_candidates([ground_truth[i]]) + \
"\tCandidates: " + self.print_candidates(cand_entities[i])
print(colored(message, 'yellow'))
if self.only_misses == False:
print(colored("New sample", 'red'))
print(' '.join(chunk_words))
for i in range(len(begin_gm)):
message = ' '.join(chunk_words[begin_gm[i]:end_gm[i]]) + "\tgt=" + \
self.print_candidates([ground_truth[i]]) + \
"\tCandidates: " + self.print_candidates(cand_entities[i])
print(colored(message, 'yellow' if i in misses_idx else 'white'))
elif isinstance(sample, AllspansSample):
begin_spans, end_spans = sample.begin_spans, sample.end_spans
gm_spans = list(zip(begin_gm, end_gm)) # [(3, 5), (10, 11), (15, 18)]
all_spans = list(zip(begin_spans, end_spans))
print(colored("New sample", 'red'))
print(' '.join(chunk_words))
for i, gm_span in enumerate(gm_spans):
if gm_span not in all_spans:
message = ' '.join(chunk_words[begin_gm[i]:end_gm[i]]) + "\tgt=" + \
self.print_candidates([ground_truth[i]]) + "\tgm_miss"
print(colored(message, 'magenta'))
elif ground_truth[i] not in cand_entities[all_spans.index(gm_span)]:
message = ' '.join(chunk_words[begin_gm[i]:end_gm[i]]) + "\tgt=" + \
self.print_candidates([ground_truth[i]]) + "\tgt_miss Candidates: " + \
self.print_candidates(cand_entities[all_spans.index(gm_span)])
print(colored(message, 'yellow'))
if self.only_misses == False:
# then also print all the spans and their candidate entities
for left, right, cand_ent in zip(begin_spans, end_spans, cand_entities):
# if span is a mention and includes gt then green color, otherwise white
if (left, right) in gm_spans and ground_truth[gm_spans.index((left, right))] in cand_ent:
message = ' '.join(chunk_words[left:right]) + "\tgt=" + \
self.print_candidates([ground_truth[gm_spans.index((left, right))]]) + \
"\tgm_gt_hit Candidates: " + \
self.print_candidates(cand_ent)
print(colored(message, 'green'))
else:
message = ' '.join(chunk_words[left:right]) + \
"\t not a mention Candidates: " + \
self.print_candidates(cand_ent)
print(colored(message, 'white'))
def create_entity_universe(gmonly_files=None, allspans_files=None, printSamples=None):
new_dataset_folder = config.base_folder+"data/new_datasets/"
if gmonly_files is None:
gmonly_files = []
if allspans_files is None:
allspans_files = ['aida_train.txt', 'aida_dev.txt', 'aida_test.txt', 'ace2004.txt',
'aquaint.txt', 'clueweb.txt', 'msnbc.txt', 'wikipedia.txt']
print("gmonly_files: ", gmonly_files)
print("allspans_files: ", allspans_files)
def create_entity_universe_aux(generator, datasets):
entities_universe = set()
for dataset in datasets:
print("Processing dataset: ", dataset)
for sample in generator.process(filepath=new_dataset_folder+dataset):
entities_universe.update(*sample.cand_entities)
entities_universe.update(sample.ground_truth)
if printSamples:
printSamples.print_sample(sample)
print("Overall statistics: ")
print("all_gm_misses: ", generator.all_gm_misses)
print("all_gt_misses: ", generator.all_gt_misses)
print("all_gm: ", generator.all_gm)
print("recall % : ", (1 - (generator.all_gm_misses+generator.all_gt_misses)/generator.all_gm)*100, " %")
print("len(entities_universe):\t\t\t", colored(len(entities_universe), 'red'))
return entities_universe
gmonly_entities, allspans_entities = set(), set()
samplesGenerator = SamplesGenerator()
if gmonly_files:
print("gmonly files statistics: ")
samplesGenerator.set_gmonly_mode()
gmonly_entities = create_entity_universe_aux(samplesGenerator, gmonly_files)
if allspans_files:
print("Test files statistics: ")
samplesGenerator.set_allspans_mode()
allspans_entities = create_entity_universe_aux(samplesGenerator, allspans_files)
all_entities = gmonly_entities | allspans_entities
print("len(all_entities) = ", len(all_entities))
# print the entities of our universe to a file together with the name
with open(config.base_folder+"data/entities/entities_universe.txt", "w") as fout:
_, wiki_id_name_map = util.load_wiki_name_id_map()
for ent_id in all_entities:
fout.write(ent_id + "\t" + wiki_id_name_map[ent_id].replace(' ', '_') + "\n")
return all_entities
def create_necessary_folders():
if not os.path.exists(config.base_folder+"data/tfrecords/"):
os.makedirs(config.base_folder+"data/tfrecords/")
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--chunking", default="per_document",
help="per_sentence or per_paragraph or per_article"
"per_document: each document is processed as one example"
"per_paragraph: each paragraph is processed as a separate example")
parser.add_argument("--p_e_m_choice", default="yago",
help="'wiki' p(e|m) constructed only from wikipedia dump (prob_wikipedia_p_e_m.txt file),\
'crosswiki' constructed from wikipedia dump + crosswiki (prob_crosswikis_wikipedia_p_e_m.txt),\
'yago' (prob_yago_crosswikis_wikipedia_p_e_m.txt)")
parser.add_argument("--cand_ent_num", type=int, default=30,
help="how many candidate entities to keep for each mention")
parser.add_argument("--lowercase_p_e_m", type=bool, default=False)
parser.add_argument("--lowercase_spans", type=bool, default=False)
parser.add_argument("--calculate_stats", type=bool, default=True)
parser.add_argument("--experiment_name", default="corefmerge",
help="under folder data/tfrecords/")
parser.add_argument("--include_wikidumpRLTD", type=bool, default=False)
parser.add_argument("--word_freq_thr", type=int, default=1,
help="words that have freq less than this are not included in our"
"vocabulary.")
parser.add_argument("--char_freq_thr", type=int, default=1)
parser.add_argument("--max_mention_width", type=int, default=10, help="in allspans mode consider all spans with"
"length <= to this value as candidate entities to be linked")
parser.add_argument("--entity_extension", default=None, help="extension_entities or extension_entities_all etc")
parser.add_argument("--persons_coreference", type=bool, default=True)
parser.add_argument("--persons_coreference_merge", type=bool, default=True)
parser.add_argument("--create_entity_universe", type=bool, default=False)
return parser.parse_args()
def log_args(folderpath):
if not os.path.exists(folderpath):
os.makedirs(folderpath)
with open(folderpath+"prepro_args.txt", "w") as fout:
attrs = vars(args)
fout.write('\n'.join("%s: %s" % item for item in attrs.items()))
with open(folderpath+"prepro_args.pickle", 'wb') as handle:
pickle.dump(args, handle)
if __name__ == "__main__":
args = _parse_args()
print(args)
create_necessary_folders()
log_args(config.base_folder+"data/tfrecords/"+args.experiment_name+"/")
vocabularyCounter = VocabularyCounter()
vocabularyCounter.count_datasets_vocabulary()
if args.create_entity_universe:
create_entity_universe(gmonly_files=[], allspans_files=['aida_train.txt', 'aida_dev.txt', 'aida_test.txt' # ])
, 'ace2004.txt', 'aquaint.txt', 'msnbc.txt'])
else:
create_tfrecords()
|
StarcoderdataPython
|
4842635
|
<filename>lib/dynamic_screening_solutions/constants/__init__.py
# HTK Imports
from htk.lib.dynamic_screening_solutions.constants.general import *
|
StarcoderdataPython
|
1732404
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-05-06 23:16
from edparser.utils.io_util import load_pickle, save_pickle
from iwpt2020 import cdroot
import numpy as np
import matplotlib.pyplot as plt
cdroot()
gold_file = 'data/iwpt2020/test-udpipe/en.fixed.conllu'
template = 'data/model/iwpt2020/bert/dep/en.conllu'
def load_conll(path):
with open(path) as src:
text = src.read()
sents = text.split('\n\n')
sents = [x for x in sents if x.strip()]
return sents
def load_sent(text: str):
return [x for x in text.split('\n') if not x.startswith('#')]
iv = set()
for each in load_conll('data/iwpt2020/train-dev-combined/en/train.conllu'):
for cell in load_sent(each):
form = cell.split('\t')[1].lower()
iv.add(form)
def calc_f1(path):
correct = 0
ngold = 0
npred = 0
for gold, pred in zip(load_conll(gold_file), load_conll(path)):
gt = set()
pt = set()
for gold, pred in zip(load_sent(gold), load_sent(pred)):
gf = gold.split('\t')[1].lower()
pf = pred.split('\t')[1].lower()
if gf in iv:
continue
idx = gold.split('\t')[0]
for rel in gold.split('\t')[8].split('|'):
gt.add((idx,) + tuple(rel.split(':')))
for rel in pred.split('\t')[8].split('|'):
pt.add((idx,) + tuple(rel.split(':')))
ngold += len(gt)
npred += len(pt)
correct += len(gt & pt)
p = correct / npred
r = correct / ngold
f1 = 2 * p * r / (p + r)
return f1
fig, ax = plt.subplots()
ind = np.arange(3)
width = 0.35
try:
cache = load_pickle('cache_f1.pkl')
except FileNotFoundError:
cache = {}
for lang in ['mbert', 'bert']:
f1s = []
for model, color in zip(['dep', 'sdp', 'ens'], 'rgb'):
key = f'{lang}-{model}'
if key in cache:
f1 = cache[key]
else:
pred_file = template.replace('bert', lang).replace('dep', model)
f1 = calc_f1(pred_file)
cache[key] = f1
f1s.append(f1)
print(key)
ax.bar(ind + (width if lang == 'bert' else 0), f1s, width, label='multilingual' if lang.startswith('m') else 'language-specific')
save_pickle(cache, 'cache_f1.pkl')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(['DTP', 'DGP', 'ENS'])
plt.ylabel('ELAS of OOV')
ax.legend()
plt.savefig('oov.pdf')
plt.show()
|
StarcoderdataPython
|
1650674
|
<filename>logwrap/__init__.py
# Copyright 2016-2018 <NAME> aka penguinolog
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""logwrap module.
Contents: 'logwrap', 'pretty_repr', 'pretty_str'
Original code was made for Mirantis Inc by <NAME>,
later it has been reworked and extended for support of special cases.
"""
import pkg_resources
from .repr_utils import PrettyFormat, PrettyRepr, PrettyStr, pretty_repr, pretty_str
from .log_wrap import logwrap, LogWrap, BoundParameter, bind_args_kwargs
__all__ = (
"LogWrap",
"logwrap",
"PrettyFormat",
"PrettyRepr",
"PrettyStr",
"pretty_repr",
"pretty_str",
"BoundParameter",
"bind_args_kwargs",
)
try: # pragma: no cover
__version__ = pkg_resources.get_distribution(__name__).version
except pkg_resources.DistributionNotFound: # pragma: no cover
# package is not installed, try to get from SCM
try:
import setuptools_scm # type: ignore
__version__ = setuptools_scm.get_version()
except ImportError:
pass
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__maintainers__ = {
"<NAME>": "<EMAIL>",
"<NAME>": "<EMAIL>",
"<NAME>": "<EMAIL>",
}
__url__ = "https://github.com/python-useful-helpers/logwrap"
__description__ = "Decorator for logging function arguments and return value by human-readable way"
__license__ = "Apache License, Version 2.0"
|
StarcoderdataPython
|
1727324
|
from django.urls import path
from . import views
urlpatterns = [
path('login/', views.loginUser, name='login'),
path('logout/', views.logoutUser, name='logout'),
path('register/', views.registerUser, name='register'),
path('', views.index, name='list'),
path('update_task/<str:pk>', views.updateTask, name='update_task' ),
path('delete/<str:pk>', views.deleteTask, name='delete' ),
]
|
StarcoderdataPython
|
190449
|
import csv
import camelot
IN_PATH = 'student_attendance.pdf'
OUT_PATH = f'{IN_PATH[:-3]}csv'
class Converter:
def __init__(self, input_path, output_path):
self.in_path = input_path
self.out_path = output_path
def table_to_csv(self, tables):
with open(self.out_path, 'w', newline='') as f:
writer = csv.writer(f)
for t, table in enumerate(tables):
if t == 0:
table.df[2][1], table.df[3][1] = "Enrollment No.", "Registration No."
writer.writerows(table.df.values)
def convert(self):
tables = camelot.read_pdf('student_attendance.pdf', pages='all', split_text=True, strip_text='\n')
self.table_to_csv(tables)
if __name__ == '__main__':
Converter(IN_PATH, OUT_PATH).convert()
|
StarcoderdataPython
|
1690403
|
<gh_stars>0
import os
import re
import numpy as np
def main():
refs = []
outs = []
for file in os.scandir('../results'):
with open(file.path, "r") as f:
results = f.read()
try:
ref = re.search(R"(?<=Creversible=yes Clevels=5 Cdecomp=\"B\(-:-:-\),B\(-:-:-\),B\(-:-:-\),B\(-:-:-\),B\(-:-:-\)\": )[\d.]+", results).group(0)
except AttributeError:
ref = re.search(R"(?<=Creversible=yes: )[\d.]+", results).group(0)
refs.append(float(ref))
out = re.findall(R"(?<=: )[\d.]+", results)
outs.append(min(map(float, out)))
delta = []
for ref, out in zip(refs, outs):
delta.append((out - ref) / ref * 100)
print(f"mean: {np.mean(delta)}")
print(f"stddev: {np.std(delta)}")
print(f"min: {np.min(delta)}")
print(f"max: {np.max(delta)}")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1656502
|
import json
from erdos.op import Op
from pylot.utils import is_obstacles_stream
class BoundingBoxLoggerOp(Op):
def __init__(self, name, flags):
super(BoundingBoxLoggerOp, self).__init__(name)
self._flags = flags
self._msg_cnt = 0
@staticmethod
def setup_streams(input_streams):
input_streams.filter(is_obstacles_stream).add_callback(
BoundingBoxLoggerOp.on_detected_objs_msg)
return []
def on_detected_objs_msg(self, msg):
self._msg_cnt += 1
if self._msg_cnt % self._flags.log_every_nth_frame != 0:
return
bboxes = [det_obj.get_bbox_label() for det_obj in msg.detected_objects]
timestamp = msg.timestamp.coordinates[0]
# Write the bounding boxes.
file_name = '{}bboxes-{}.json'.format(self._flags.data_path, timestamp)
with open(file_name, 'w') as outfile:
json.dump(bboxes, outfile)
|
StarcoderdataPython
|
3204070
|
import serial
port = None
def init(dev, baud):
global port
if port == None:
port = serial.Serial(dev, baud)
port.readline()
def shake():
global port
port.write("1\r\n")
port.readline()
|
StarcoderdataPython
|
3213915
|
<reponame>ArthurHowardMorris/ling_features
import re
import numpy as np
from numpy import dot
from numpy.linalg import norm
# Compute cosine similarity between two vectors
def compute_cos_sim(vector_a, vector_b):
if not np.all(vector_a == 0) and not np.all(vector_b == 0):
cos_sim = dot(vector_a, vector_b)/(norm(vector_a) * norm(vector_b))
else:
cos_sim = 0
return cos_sim
# Assemble the regexes for function words
def assemble_regexes(words):
regex_list = {}
for word in words:
key = "func_" + re.sub("\*|\'", "_", word.lower())
regex = r"\b(?:" + re.sub('\*(?:\s*$)?', "[a-z0-9]*(')?[a-z0-9]*", word.lower()) + r")(?=(?:[^a-zA-Z0-9_']|$))"
regex_list[key] = re.compile(regex)
return regex_list
|
StarcoderdataPython
|
1771087
|
from django.db import models
from django.contrib .auth.models import User
from django.utils.translation import ugettext as _
class Employer(models.Model):
owner = models.ForeignKey(to=User, on_delete=models.CASCADE, blank=True, null=True)
name = models.CharField(max_length=50)
updated_at = models.DateTimeField('Updated at', auto_now=True)
created_at = models.DateTimeField('Created at', auto_now_add=True)
class Meta:
verbose_name = 'Employer'
verbose_name_plural = 'Employers'
def __str__(self):
return self.name
class Task(models.Model):
title = models.CharField(max_length=50)
description = models.CharField(max_length=100)
employer = models.ForeignKey(Employer, models.SET_NULL, null=True,
blank=True, related_name="tasks", help_text="employer_id")
completed = models.BooleanField(default=False)
updated_at = models.DateTimeField('Updated at', auto_now=True)
created_at = models.DateTimeField('Created at', auto_now_add=True)
class Meta:
verbose_name = 'Task'
verbose_name_plural = 'Tasks'
def __str__(self):
return self.title
def check_owner(self, task, user):
if task.employer.owner.username == user.username:
return True
return False
|
StarcoderdataPython
|
3322854
|
<filename>fieldkit/test/test_lattice.py
""" Unit tests for lattice data structures.
"""
import unittest
import numpy as np
import fieldkit
class LatticeTest(unittest.TestCase):
""" Test cases for :py:class:`~fieldkit.lattice.Lattice` and :py:class:`~fieldkit.lattice.HOOMDLattice`.
"""
def test(self):
""" Test for basic properties of a skewed lattice.
"""
lattice = fieldkit.Lattice([1,0,0],[0,2,2],[0,0,3])
# size of lattice
np.testing.assert_almost_equal(lattice.L, [1,np.sqrt(8),3])
# lattice vectors
np.testing.assert_almost_equal(lattice.a, [1,0,0])
np.testing.assert_almost_equal(lattice.b, [0,2,2])
np.testing.assert_almost_equal(lattice.c, [0,0,3])
# cell matrix
np.testing.assert_almost_equal(lattice.matrix, ((1,0,0),(0,2,0),(0,2,3)))
# inverse cell matrix
np.testing.assert_almost_equal(lattice.inverse, ((1,0,0),(0,0.5,0),(0,-1./3.,1./3.)))
# volume
self.assertAlmostEqual(lattice.volume, 6)
def test_hoomd(self):
""" Test for creation of various lattices in the hoomd / lammps definition.
"""
# cube
lattice = fieldkit.HOOMDLattice(L=2)
np.testing.assert_almost_equal(lattice.L, (2.0,2.0,2.0))
np.testing.assert_almost_equal(np.diag(lattice.matrix), (2.0,2.0,2.0))
np.testing.assert_almost_equal(lattice.matrix-np.diag(lattice.L), np.zeros((3,3)))
self.assertAlmostEqual(lattice.volume, 2**3)
# orthorhombic
lattice = fieldkit.HOOMDLattice(L=(0.2,0.3,0.4))
np.testing.assert_almost_equal(lattice.L, (0.2,0.3,0.4))
np.testing.assert_almost_equal(np.diag(lattice.matrix), (0.2,0.3,0.4))
np.testing.assert_almost_equal(lattice.matrix-np.diag(lattice.L), np.zeros((3,3)))
self.assertAlmostEqual(lattice.volume, 0.2*0.3*0.4)
# tilted in xy
lattice = fieldkit.HOOMDLattice(L=4.,tilt=(0.5,0.,0.))
np.testing.assert_almost_equal(lattice.matrix, ((4.,2.,0.),(0.,4.,0.),(0.,0.,4.)))
self.assertAlmostEqual(lattice.volume, 4**3)
# tilted in xy and yz
lattice = fieldkit.HOOMDLattice(L=4.,tilt=(0.5,0.,0.5))
np.testing.assert_almost_equal(lattice.matrix, ((4.,2.,0.),(0.,4.,2.),(0.,0.,4.)))
self.assertAlmostEqual(lattice.volume, 4**3)
# tilted in xy, xz, and yz
lattice = fieldkit.HOOMDLattice(L=4.,tilt=(0.5,0.5,0.5))
np.testing.assert_almost_equal(lattice.matrix, ((4.,2.,2.),(0.,4.,2.),(0.,0.,4.)))
self.assertAlmostEqual(lattice.volume, 4**3)
def test_coordinate(self):
""" Test for mapping of fractional coordinates to real coordinates.
"""
lattice = fieldkit.HOOMDLattice(L=(1,2,4))
r = lattice.as_coordinate((0.5, 0.25, 0.125))
np.testing.assert_array_almost_equal(r, (0.5, 0.5, 0.5))
# two at once
r = lattice.as_coordinate(((0., 0., 0.),(1.0,1.0,1.0)))
np.testing.assert_array_almost_equal(r, ((0,0,0),(1,2,4)))
lattice = fieldkit.HOOMDLattice(L=4, tilt=(0.5,0.,0.))
r = lattice.as_coordinate((0.5, 0.5, 0.5))
np.testing.assert_array_almost_equal(r, (3., 2., 2.))
lattice = fieldkit.HOOMDLattice(L=4, tilt=(0.5,0.,0.5))
r = lattice.as_coordinate((0.5, 0.5, 0.5))
np.testing.assert_array_almost_equal(r, (3., 3., 2.))
def test_fraction(self):
""" Test for mapping of real coordinates to fractional coordinates.
"""
lattice = fieldkit.HOOMDLattice(L=(1,2,4))
f = lattice.as_fraction((0.5,0.5,0.5))
np.testing.assert_almost_equal(f, (0.5, 0.25, 0.125))
# two at once
f = lattice.as_fraction(((0,0,0),(1, 2, 4)))
np.testing.assert_array_almost_equal(f, ((0.,0.,0.),(1.,1.,1.)))
lattice = fieldkit.HOOMDLattice(L=4, tilt=(0.5,0.,0.))
f = lattice.as_fraction((3.,2.,2.))
np.testing.assert_almost_equal(f, (0.5, 0.5, 0.5))
lattice = fieldkit.HOOMDLattice(L=4, tilt=(0.5,0.,0.5))
f = lattice.as_fraction((3.,3.,2.))
np.testing.assert_almost_equal(f, (0.5, 0.5, 0.5))
def test_orthorhombic(self):
""" Test for construction of orthorhombic basis from triclinic lattice.
"""
tri = fieldkit.HOOMDLattice(L=(2.,3.,4.), tilt=(0.5, 0.5, 0.5))
ortho = tri.to_orthorhombic()
self.assertAlmostEqual(ortho.volume, tri.volume)
np.testing.assert_almost_equal(ortho.a, (2,0,0))
np.testing.assert_almost_equal(ortho.b, (0,3,0))
np.testing.assert_almost_equal(ortho.c, (0,0,4))
|
StarcoderdataPython
|
1612344
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return {
'fieldname': 'case_level',
'transactions': [
{
'label': _('Case Log'),
'items': ['Case Log']
}
]
}
|
StarcoderdataPython
|
3223921
|
<reponame>ConsenSys/Legions
#!/usr/bin/env python3
# Legion - <NAME>, ConsenSys Diligence
import argparse
from legions.context import LegionContext
from legions.statusbar import LegionStatusBar
from nubia import PluginInterface, CompletionDataSource
from nubia.internal.blackcmd import CommandBlacklist
class LegionPlugin(PluginInterface):
"""
The PluginInterface class is a way to customize nubia for every customer
use case. It allowes custom argument validation, control over command
loading, custom context objects, and much more.
"""
def create_context(self):
"""
Must create an object that inherits from `Context` parent class.
The plugin can return a custom context but it has to inherit from the
correct parent class.
"""
return LegionContext()
def validate_args(self, args):
"""
This will be executed when starting nubia, the args passed is a
dict-like object that contains the argparse result after parsing the
command line arguments. The plugin can choose to update the context
with the values, and/or decide to raise `ArgsValidationError` with
the error message.
"""
pass
def get_opts_parser(self, add_help=True):
"""
Builds the ArgumentParser that will be passed to , use this to
build your list of arguments that you want for your shell.
"""
opts_parser = argparse.ArgumentParser(
description="Legion - EVM Node Security Toolkit",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=add_help,
)
opts_parser.add_argument(
"--config", "-c", default="", type=str, help="Configuration File"
)
opts_parser.add_argument(
"--verbose",
"-v",
action="count",
default=0,
help="Increase verbosity, can be specified " "multiple times",
)
opts_parser.add_argument(
"--stderr",
"-s",
action="store_true",
help="By default the logging output goes to a "
"temporary file. This disables this feature "
"by sending the logging output to stderr",
)
return opts_parser
def get_completion_datasource_for_global_argument(self, argument):
if argument == "--config":
return ConfigFileCompletionDataSource()
return None
def create_usage_logger(self, context):
"""
Override this and return you own usage logger.
Must be a subtype of UsageLoggerInterface.
"""
return None
def get_status_bar(self, context):
"""
This returns the StatusBar object that handles the bottom status bar
and the right-side per-line status
"""
return LegionStatusBar(context)
def getBlacklistPlugin(self):
blacklister = CommandBlacklist()
blacklister.add_blocked_command("be-blocked")
return blacklister
class ConfigFileCompletionDataSource(CompletionDataSource):
def get_all(self):
return ["/tmp/c1", "/tmp/c2"]
|
StarcoderdataPython
|
3242221
|
<filename>dwcontents/utils.py
# dwcontents
# Copyright 2018 data.world, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# This product includes software developed at
# data.world, Inc.(http://data.world/).
from __future__ import unicode_literals, print_function
import time
from builtins import str
from itertools import groupby
import nbformat
from nbformat import v1, v2, v3, v4
str('Use str() once to force PyCharm to keep import')
def directory_path(path):
path = normalize_path(path)
return path if path == '' else '{}/'.format(path)
def normalize_path(*parts):
return '/'.join([part.strip('/') for part in parts
if part is not None and part != ''])
def relative_path(path, parent):
path = normalize_path(path)
parent = normalize_path(parent)
return normalize_path(path[len(parent):])
def split_parent(path):
path = normalize_path(path)
parent, _, name = path.rpartition('/')
return directory_path(parent), name
def to_api_path(dw_path, root_dir=''):
rel_path = relative_path(dw_path, root_dir)
return rel_path
def to_dw_path(path, root_dir='', prefix=''):
path = normalize_path(path)
if path == prefix:
path = root_dir
else:
path = normalize_path(
root_dir, relative_path(path, prefix))
path_parts = path.split('/', 2)
owner = path_parts[0] if path_parts[0] != '' else None
dataset_id = path_parts[1] if len(path_parts) > 1 else None
file_path = path_parts[2] if len(path_parts) > 2 else None
return owner, dataset_id, file_path
def to_nb_json(content, version_specific=False):
if not version_specific:
return nbformat.from_dict(content)
else:
# Not sure why this is needed instead of from_dict, sometimes
versions = {
1: v1,
2: v2,
3: v3,
4: v4,
}
major = content.get('nbformat', 1)
minor = content.get('nbformat_minor', 0)
nb = versions[major].to_notebook_json(content, minor=minor)
return nb
def unique_justseen(iterable, key=None):
sorted_items = sorted(iterable, key=key)
groups = groupby(sorted_items, key=key)
return (next(v) for k, v in groups)
class MWT(object):
"""Memoize With Timeout"""
_caches = {}
_timeouts = {}
def __init__(self, timeout=2):
self.timeout = timeout
def collect(self):
"""Clear cache of results which have timed out"""
for func in self._caches:
cache = {}
for key in self._caches[func]:
if ((time.time() - self._caches[func][key][1]) <
self._timeouts[func]):
cache[key] = self._caches[func][key]
self._caches[func] = cache
def invalidate(self):
for func in self._caches:
self._caches[func].clear()
def __call__(self, f):
self.cache = self._caches[f] = {}
self._timeouts[f] = self.timeout
def func(*args, **kwargs):
kw = sorted(kwargs.items())
key = (args, tuple(kw))
try:
v = self.cache[key]
if (time.time() - v[1]) > self.timeout:
raise KeyError
except KeyError:
v = self.cache[key] = f(*args, **kwargs), time.time()
return v[0]
func.func_name = f.__name__
return func
|
StarcoderdataPython
|
1692783
|
import argparse
import os
import torch
import yaml
from train.train import train
os.environ['NCCL_LL_THRESHOLD'] = '0'
parser = argparse.ArgumentParser(description='Train model on multiple cards')
parser.add_argument('--config', help='path to yaml config file')
parser.add_argument('--local_rank', type=int, help='local gpu id')
args = parser.parse_args()
config = yaml.safe_load(open(args.config))
torch.distributed.init_process_group(backend='nccl', init_method='env://')
config['local_rank'] = args.local_rank
torch.cuda.set_device(args.local_rank)
train(config)
|
StarcoderdataPython
|
1632222
|
<reponame>egromero/chat_app_flask
# Copyright (c) 2009-2015 <NAME> and gevent contributors. See LICENSE for details.
from __future__ import absolute_import
import os
from gevent._util import copy_globals
try:
if os.environ.get('GEVENT_CORE_CFFI_ONLY'):
raise ImportError("Not attempting corecext")
from gevent.libev import corecext as _core
except ImportError:
if os.environ.get('GEVENT_CORE_CEXT_ONLY'):
raise
# CFFI/PyPy
from gevent.libev import corecffi as _core
copy_globals(_core, globals())
__all__ = _core.__all__
|
StarcoderdataPython
|
1649400
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# nnutil2 - Tensorflow utilities for training neural networks
# Copyright (c) 2020, <NAME> <<EMAIL>>
#
# This file is part of 'nnutil2'.
#
# This file may be modified and distributed under the terms of the 3-clause BSD
# license. See the LICENSE file for details.
import tensorflow as tf
from .. import util
def flatten_vector(value, inner_structure):
"""
Returns nested structure as a flat tensor (batch, inner_size)
"""
batch_shape = util.batch_shape(value, inner_structure)
batch_size = batch_shape.num_elements()
flat_values = [tf.reshape(x, shape=(batch_size, util.as_shape(s).num_elements()))
for x, s in zip(tf.nest.flatten(value), tf.nest.flatten(inner_structure))]
if len(flat_values) == 1:
flat_vector = flat_values[0]
else:
flat_vector = tf.concat(flat_values, axis=-1)
return flat_vector
def unflatten_vector(value, inner_structure, batch_shape):
"""
Unflattens a batch of vectors (batch, inner_size) into a nested structure
"""
batch_shape = util.as_shape(batch_shape)
inner_shape = util.as_shape(inner_structure)
flat_inner_shape = tf.nest.flatten(inner_shape)
assert value.shape.rank == 2
batch_size = value.shape[0]
inner_size = value.shape[1]
assert batch_size == batch_shape.num_elements()
assert inner_size == util.num_elements(inner_shape)
if len(flat_inner_shape) == 1:
flat_values = [value]
else:
flat_values = tf.split(value, num_or_size_splits=[s.num_elements() for s in flat_inner_shape], axis=1)
unflat_values = [tf.reshape(x, shape=batch_shape + s) for x, s in zip(flat_values, flat_inner_shape)]
unflat_values = tf.nest.pack_sequence_as(inner_structure, unflat_values)
return unflat_values
|
StarcoderdataPython
|
3232849
|
<gh_stars>0
import pandas as pd
import numpy as np
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import Dataset
from collections import Counter
import torch
from PIL import Image
from skimage import io
#from data_util import TrainProtsDataset, ValProtsDataset, TestProtsDataset
from torch.utils.data import DataLoader
# from model
from tqdm import tqdm
import torch.optim as optim
from sklearn.metrics import f1_score
import torch.nn.functional as F
#from model import PretrainedResnet50
|
StarcoderdataPython
|
3356680
|
<filename>src/posts/models.py
import os
from django.db import models
from django.urls import reverse
from tagging.registry import register
from tagging.fields import TagField
def rename_file_with_slug(old_filename, slug):
filename, file_extension = os.path.splitext(old_filename)
truncated_slug = slug[:25] if len(slug) > 25 else slug
return '{0}{1}'.format(truncated_slug, file_extension)
def generate_featured_image_path(instance, filename):
return 'posts/uploads/{0}'.format(rename_file_with_slug(filename, instance.slug))
class PublishedPostManager(models.Manager):
def get_queryset(self):
return super().get_queryset() \
.filter(is_published=True) \
.order_by('-published_at')
class Post(models.Model):
objects = models.Manager()
published_objects = PublishedPostManager()
is_published = models.BooleanField(default=False)
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=50, unique=True)
excerpt = models.TextField()
body = models.TextField()
tags = TagField()
featured_image = models.ImageField(upload_to=generate_featured_image_path, null=True)
created_at = models.DateTimeField('date created', auto_now_add=True)
updated_at = models.DateTimeField('last modified', auto_now=True)
published_at = models.DateTimeField('date published', null=True, blank=True)
def get_absolute_url(self):
return reverse('posts:detail', kwargs={'slug': self.slug})
def get_others(self):
return Post.published_objects.exclude(pk=self.pk)
def get_next_published(self):
try:
published_gte = self.get_others().filter(published_at__gte=self.published_at)
return published_gte.order_by('published_at')[0]
except IndexError:
raise Post.DoesNotExist
def get_previous_published(self):
try:
published_lte = self.get_others().filter(published_at__lte=self.published_at)
return published_lte.order_by('-published_at')[0]
except IndexError:
raise Post.DoesNotExist
def __str__(self):
return self.title
register(Post, 'post_tags')
|
StarcoderdataPython
|
3315177
|
<filename>botfile/sender.py
import discord
from discord.ext import commands
import asyncio
class Sender(commands.Cog):
def __init__(self,bot):
self.bot =bot
@commands.command()
async def url(self,ctx,mid:int,cid:int=ctx.channel.id):
try:
g = ctx.guild
ch = bot.get_channel(cid)
msg = ch.fetch_messsage(mid)
except:
await ctx.send('メッセージ取得に失敗')
else:
atc_list = msg.attachments
if atc_list != None:
await ctx.send('ファイルが存在しません。')
elif len(atc_list) >= 2:
await ctx.send('ファイルが二つ以上添付されています。1つのみの対応です。')
else:
for atc in atc_list:
await ctx.send(atc.url)
@commands.command()
async def send(self,ctx,ctg_id:int):
#カテゴリの取得
ctg = ctx.guild.get_channel(ctg_id)
if ctg == None:
await ctx.send('カテゴリの取得に失敗しました')
return
elif ctg.type != discord.ChannelType.category:
await ctx.send('カテゴリIDを指定してください')
return
elif ctg.type == discord.ChannelType.category:
embed = discord.Embed(title='送信オプションの確認',description='メンション確認',colour=discord.Colour.blue())
embed.add_field(name='everyoneメンションする',value='\u2705',inline=False)
embed.add_field(name='everyoneメンションしない',value='\u274E',inline=False)
mention_msg = await ctx.send(embed=embed)
await mention_msg.add_reaction('\u2705')
await mention_msg.add_reaction('\u274E')
await mention_msg.add_reaction('\u26D4')
def check(reaction,user):
return user==ctx.author and reaction.message==mention_msg
try:
reaction,user = await self.bot.wait_for('reaction_add',timeout=60,check=check)
except asyncio.TimeoutError:
await ctx.send('60秒間無操作だったため操作を中止しました。')
else:
if str(reaction.emoji) == '\u2705':
do_mention = True
elif str(reaction.emoji) == '\u274E':
do_mention = False
elif str(reaction.emoji) == '\u26D4':
await ctx.send('終了しました')
return
type_msg = await ctx.send('送信メッセージをこのチャンネルに送信してください。')
def msgcheck(m):
return m.author == ctx.author and m.channnel == ctx.channel
try:
m = await self.bot.wait_for('message',timeout=60,check=msgcheck)
except asyncio.TimeoutError:
await ctx.send('60秒間無操作だったため操作を中止しました')
return
else:
send_content = m.content
if do_mention == True:
everyone = ctx.guild.get_role(ctx.guild.id)
send_message = everyone.mention + "\n" + send_content
else:
send_message = send_content
for ch in ctg.text_channels:
await ch.send(send_message)
def setup(bot):
bot.add_command(Sender(bot))
|
StarcoderdataPython
|
3227296
|
<filename>tests/test_binary_search.py
import unittest
from random import randint
from big_o import big_o, complexities
from src.binary_search import binary_search
class TestBinarySearch(unittest.TestCase):
def test_find(self):
self.assertEqual(binary_search([1, 3, 5, 7, 9], 3), 1)
def test_not_find(self):
self.assertEqual(binary_search([1, 2, 3, 4, 5], 6), None)
def test_complexity(self):
best, _ = big_o(
lambda arr: binary_search(arr, arr[randint(0, len(arr) - 1)]),
lambda x: list(range(x)),
n_repeats=1000
)
self.assertIsInstance(best, complexities.Logarithmic)
|
StarcoderdataPython
|
129934
|
<filename>test_split.py
from itertools import chain, repeat
from hypothesis import given
import hypothesis.strategies as st
import pytest
from split import chunks, groupby, partition, split
expected_keys = (1, 2, 3, 4, 5)
expected_groups = ((1, 1), (2, 2), (3, 3), (4, 4), (5, 5))
def call(*args, **kwargs):
return args, kwargs
@pytest.fixture()
def data():
return chain.from_iterable(repeat(expected_keys, 2))
def test_groupby_keys(data):
assert expected_keys == tuple(key for key, group in groupby(data))
def test_groupby_values(data):
assert expected_groups == tuple(tuple(group) for key, group in groupby(data))
def test_partition():
expected = (1, 3, 5, 7, 9), (0, 2, 4, 6, 8)
assert expected == tuple(map(tuple, partition(lambda x: x % 2 != 0, range(10))))
@given(st.text())
def test_split(s):
assert list(map(''.join, split('a', s))) == s.split('a')
@pytest.mark.parametrize(
'call,expected',
(
(call(range(10), 3), [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, None, None)]),
)
)
def test_chunks(call, expected):
args, kwargs = call
assert expected == list(chunks(*args, **kwargs))
|
StarcoderdataPython
|
1707699
|
def comp_L2(self, L2_ref=None):
"""Compute and set the Rotor phase inductance for the equivalent electrical circuit
Parameters
----------
self : EEC_SCIM
an EEC_SCIM object
L2_ref : float
reference inductance
"""
if L2_ref is None:
raise Exception("L2 parameter for EEC_SCIM must be enforced !")
if self.type_comp_Lr == 1:
# analytic calculation
# L2 = machine.rotor.slot.comp_inductance_leakage_ANL() #TODO
pass
else:
# FEA calculation
# L2 = machine.rotor.slot.comp_inductance_leakage_FEA() #TODO
pass
self.L2 = L2_ref * self.Xke_skinR
|
StarcoderdataPython
|
123062
|
#!/usr/bin/env python
# encoding: utf8
#
# Grab http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
#
from __future__ import print_function
import os, sys
from argparse import ArgumentParser
from robofab.objects.objectsRF import OpenFont
from unicode_util import parseUnicodeDataFile, MainCategories as UniMainCategories
lightBlueColor = (0.86, 0.92, 0.97, 1.0)
lightTealColor = (0.8, 0.94, 0.95, 1.0)
lightYellowColor = (0.97, 0.95, 0.83, 1.0)
lightPurpleColor = (0.93, 0.9, 0.98, 1.0)
lightGreyColor = (0.94, 0.94, 0.94, 1.0)
mediumGreyColor = (0.87, 0.87, 0.87, 1.0)
lightGreenColor = (0.89, 0.96, 0.92, 1.0)
mediumGreenColor = (0.77, 0.95, 0.76, 1.0)
lightRedColor = (0.98, 0.89, 0.89, 1.0)
lightOrangeColor = (1.0, 0.89, 0.82, 1.0)
redColor = (1, 0.3, 0.3, 1)
colorsByGlyphName = [
(set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'), lightBlueColor), # light blue 1
]
colorsByUCMainCategory = {
# UniMainCategories.Letter: (1, 1, 1, 1),
UniMainCategories.Mark: lightRedColor,
UniMainCategories.Punctuation: lightGreyColor,
UniMainCategories.Format: lightGreyColor,
UniMainCategories.Number: lightGreenColor,
UniMainCategories.Symbol: lightTealColor,
UniMainCategories.Separator: lightPurpleColor,
UniMainCategories.Control: redColor,
UniMainCategories.Surrogate: redColor,
UniMainCategories.PrivateUse: lightYellowColor,
UniMainCategories.Unassigned: lightYellowColor,
UniMainCategories.Other: lightOrangeColor,
}
def colorForGlyph(name, unicodes, ucd):
for nameSet, color in colorsByGlyphName:
if name in nameSet:
return color
for uc in unicodes:
cp = ucd.get(uc)
if cp is None:
continue
return colorsByUCMainCategory.get(cp.mainCategory)
if len(unicodes) == 0:
if name.find('.cn') != -1:
# pure component
return mediumGreenColor
else:
# precomposed
return mediumGreyColor
return None
def main():
argparser = ArgumentParser(
description='Set robofont color marks on glyphs based on unicode categories')
argparser.add_argument(
'-dry', dest='dryRun', action='store_const', const=True, default=False,
help='Do not modify anything, but instead just print what would happen.')
argparser.add_argument(
'-ucd', dest='ucdFile', metavar='<file>', type=str,
help='UnicodeData.txt file from http://www.unicode.org/')
argparser.add_argument(
'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
args = argparser.parse_args()
dryRun = args.dryRun
markLibKey = 'com.typemytype.robofont.mark'
ucd = {}
if args.ucdFile:
ucd = parseUnicodeDataFile(args.ucdFile)
for fontPath in args.fontPaths:
font = OpenFont(fontPath)
for g in font:
rgba = colorForGlyph(g.name, g.unicodes, ucd)
if rgba is None:
if markLibKey in g.lib:
del g.lib[markLibKey]
else:
g.lib[markLibKey] = [float(n) for n in rgba]
print('Write', fontPath)
if not dryRun:
font.save()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1651081
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from .views import location
urlpatterns = patterns('',
url(r'^(?P<locationname>[A-Za-z]+)/$', location, name='location'),
url(r'^(?P<locationname>[A-Za-z]+)/(?P<placeofinterest>[A-Za-z]+)/$', location, name = 'placeofinterest'),
)
|
StarcoderdataPython
|
4815775
|
import types
def isclass(obj):
"""
Helper.
Identical to Python 2.7 's inspect.isclass.
isclass in Python 2.6 also returns True when
the passed object has a __bases__ attribute.
(like in case of an instance.)
"""
return isinstance(obj, (type, types.ClassType))
from .network import *
from .string_utils import *
|
StarcoderdataPython
|
1699406
|
#!/usr/bin/env python
from eth_tester.exceptions import TransactionFailed
from utils import longTo32Bytes, longToHexString, fix, AssertLog, stringToBytes, EtherDelta, PrintGasUsed, BuyWithCash, TokenDelta, EtherDelta, nullAddress
from pytest import raises, mark, fixture as pytest_fixture
from reporting_utils import proceedToNextRound
from decimal import Decimal
from math import floor
APPROVAL_AMOUNT = 2**255
def test_uniswap_router(contractsFixture, cash, reputationToken):
weth = contractsFixture.contracts["WETH9"]
uniswap = contractsFixture.contracts["UniswapV2Router01"]
account = contractsFixture.accounts[0]
deadline = contractsFixture.eth_tester.backend.chain.header.timestamp + 1000000
# We'll provide some liquidity to the REP/DAI exchange
cashAmount = 100 * 10**18
repAmount = 10 * 10**18
cash.faucet(cashAmount)
reputationToken.faucet(repAmount)
cash.approve(uniswap.address, APPROVAL_AMOUNT)
reputationToken.approve(uniswap.address, APPROVAL_AMOUNT)
uniswap.addLiquidity(reputationToken.address, cash.address, repAmount, cashAmount, 0, 0, account, deadline)
# We'll provide liquidity to the ETH/DAI exchange now
cashAmount = 1000 * 10**18
ethAmount = 10 * 10**18
cash.faucet(cashAmount)
uniswap.addLiquidityETH(cash.address, cashAmount, cashAmount, ethAmount, account, deadline, value=ethAmount)
# Now lets do some swaps. We'll pay some DAI get an exact amount of REP first
exactRep = 10**17
maxDAI = 1.1 * 10**18
cash.faucet(maxDAI)
with TokenDelta(reputationToken, exactRep, account, "REP token balance wrong"):
uniswap.swapTokensForExactTokens(exactRep, maxDAI, [cash.address, reputationToken.address], account, deadline)
# Now we'll pay an exact amount of DAI to get some REP
exactDAI = 10**18
minRep = .95 * 10**17
cash.faucet(exactDAI)
with TokenDelta(cash, -exactDAI, account, "Cash token balance wrong"):
uniswap.swapExactTokensForTokens(exactDAI, minRep, [cash.address, reputationToken.address], account, deadline)
# Now lets pay some DAI to get an exact amount of ETH. We pay gas to execute this so we subtract a dust value to account for that
exactETH = 10**16
cash.faucet(maxDAI)
initialETHBalance = contractsFixture.eth_tester.get_balance(account)
uniswap.swapTokensForExactETH(exactETH, maxDAI, [cash.address, weth.address], account, deadline)
newETHBalance = contractsFixture.eth_tester.get_balance(account)
dust = 10**7
assert newETHBalance - initialETHBalance > exactETH - dust
# Now we pay an exact amount of DAI to get some ETH
minETH = .95 * 10**16
cash.faucet(exactDAI)
with TokenDelta(cash, -exactDAI, account, "Cash token balance wrong"):
uniswap.swapExactTokensForETH(exactDAI, minETH, [cash.address, weth.address], account, deadline)
# Now lets pay some ETH to get an exact amount of DAI.
maxETH = 1.1 * 10**16
uniswap.swapETHForExactTokens(exactDAI, [weth.address, cash.address], account, deadline, value=maxETH)
# Finally we pay an exact amount of ETH to get some DAI.
minDAI = .95 * 10**18
uniswap.swapExactETHForTokens(minDAI, [weth.address, cash.address], account, deadline, value=exactETH)
|
StarcoderdataPython
|
3358669
|
<reponame>bradmontgomery/django-redis-metrics
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from redis_metrics.utils import generate_test_metrics
class Command(BaseCommand):
args = '<metric-name> [<metric-name> ...]'
help = "Creates Lots of Dummy Metrics"
option_list = BaseCommand.option_list + (
make_option(
'-r',
'--randomize',
action='store_true',
dest='randomize',
default=True,
help='Randomize Metric Data'
),
make_option(
'--no-randomize',
action='store_false',
dest='randomize',
default=True,
help='Do not randomize Metric Data'
),
make_option(
'-n',
'--num-days',
action='store',
dest='num_days',
type="int",
default=365 * 3, # Default to 3 years
help='Number of Days worth of data to generate'
),
make_option(
'-c',
'--cap',
action='store',
dest='cap',
default=None,
help='Cap the maximum metric value'
),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("You must provide at least one metric name")
slugs = args
cap = options["cap"]
days = options["num_days"]
randomize = options["randomize"]
self.stdout.write("\nGenerating metrics using the following:\n")
self.stdout.write("Slugs: {0}\n".format(u", ".join(slugs)))
self.stdout.write("Days: {0}\n".format(days))
self.stdout.write("Randomize: {0}\n".format(randomize))
self.stdout.write("Cap: {0}\n".format(cap))
for slug in slugs:
generate_test_metrics(slug, num=days, randomize=randomize, cap=cap)
|
StarcoderdataPython
|
1695113
|
<filename>src/products/views.py
# Here we are going to render the contents of the products database in these pages.
# This is a good case of dedicating each app to specific roles.
# This views.py has it's own template folder 'products/templates/'.
# Also checkout pages/views.py to see some comments that can help you understand some
# things on this page,
# and checkout the logic in the context(s) are here. Very modular.
from django.shortcuts import render, get_object_or_404, redirect
from .forms import ProductForm # Checkout forms.py
from .models import Product
# From: https://www.w3schools.com/tags/ref_httpmethods.asp
# GET is used to request data from a specified resource.
# GET is one of the most common HTTP methods.
# Note that the query string (name/value pairs) is sent in the URL of a GET request:
# /test/demo_form.php?name1=value1&name2=value2
# Some other notes on GET requests:
# GET requests can be cached
# GET requests remain in the browser history
# GET requests can be bookmarked
# GET requests should never be used when dealing with sensitive data
# GET requests have length restrictions
# GET requests is only used to request data (not modify)
# From: https://www.w3schools.com/tags/ref_httpmethods.asp
# POST is used to send data to a server to create/update a resource.
# The data sent to the server with POST is stored in the request body of the HTTP request:
# POST /test/demo_form.php HTTP/1.1
# Host: w3schools.com
# name1=value1&name2=value2
# POST is one of the most common HTTP methods.
# Some other notes on POST requests:
# POST requests are never cached
# POST requests do not remain in the browser history
# POST requests cannot be bookmarked
# POST requests have no restrictions on data length
# This is how you will create a record to DB though webpage.
def product_create_view(request):
form = ProductForm(request.POST or None) #Default param is request.GET
if form.is_valid(): # Has to follow logic from forms.py
form.save()
form = ProductForm()
context = {
'form': form
}
return render(request, "products/product_create.html", context)
def product_update_view(request, id=id):
obj = get_object_or_404(Product, id=id) # This one line is how you handle a page not found or 404. You could also do it this way: https://docs.djangoproject.com/en/2.2/topics/http/views/#django.http.Http404 check comment below this function. Needs to be imported.
form = ProductForm(request.POST or None, instance=obj)
if form.is_valid():
form.save()
context = {
'form': form
}
return render(request, "products/product_create.html", context) #http://127.0.0.1:8000/product/<int:id>/update
# Example of another 404 handling:
# from django.http import Http404
# try:
# obj = Product.objects.get(id=1)
# except Product.DoesNotExist:
# raise Http404
def product_list_view(request):
queryset = Product.objects.all() # list of objects
context = {
"object_list": queryset
}
return render(request, "products/product_list.html", context) # Checkout http://127.0.0.1:8000/products/
def product_detail_view(request, id):
obj = get_object_or_404(Product, id=id)
context = {
"object": obj
}
return render(request, "products/product_detail.html", context)
def product_delete_view(request, id):
obj = get_object_or_404(Product, id=id)
if request.method == "POST": # Make sure it's a POST request!
obj.delete() # This one line is how you delete the specific Object QuerySet called by 'obj = get_object_or_404(Product, id=id)'
return redirect('../../') # Go back to the '/product/<int:someID>' page. Needs to be imported.
context = {
"object": obj
}
return render(request, "products/product_delete.html", context) #http://1172.16.58.3:8000/product/<int:id>/delete
|
StarcoderdataPython
|
3270062
|
"""Particle filtering and smoothing."""
from ._particle_filter import (
ParticleFilter,
effective_number_of_events,
resample_categorical,
)
from ._particle_filter_posterior import ParticleFilterPosterior
|
StarcoderdataPython
|
3280395
|
# -*- coding: utf-8 -*-
"""
Conftest.
"""
import pytest
from pathlib import Path
@pytest.fixture(scope="session", autouse=True)
def data_path() -> Path:
"""Path to test data."""
return Path(__file__).parent / "data"
@pytest.fixture(scope="session")
def cal_data(data_path: Path):
return data_path / "Receiver01_25C_2019_11_26_040_to_200MHz"
@pytest.fixture(scope="session", autouse=True)
def tmpdir(tmp_path_factory):
return tmp_path_factory.mktemp("edges-cal")
|
StarcoderdataPython
|
3276865
|
<reponame>Cougargriff/SK-Purple-Convert
import csv
from os import system, name
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
blue_f = open('blue_zero_point_foods.csv')
purple_f = open('purple_zero_point_foods.csv')
blue_csv = csv.reader(blue_f)
purple_csv = csv.reader(purple_f)
diff_foods = []
progress = 0
def print_progress(p):
progress = ""
for x in range(0, p / 3):
progress += "."
print progress
# loop through purple foods and check if in blue foods
for p in purple_csv:
clear()
print_progress(progress)
in_list = False
for b in blue_csv:
diff = p[0].strip().lower() == b[0].strip().lower()
# print "{} == {} = {}".format(p[0], b[0], diff)
if diff == True:
in_list = True
blue_csv = csv.reader(open('blue_zero_point_foods.csv'))
print "."
if in_list == False:
diff_foods.append(p)
progress += 1
print "Found {} foods in purple plan not included in blue plan.".format(len(diff_foods))
# output new csv containing the difference of purple
with open('in_purple_not_blue.csv', 'wb') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow('Diff_Foods')
for food in diff_foods:
filewriter.writerow(food)
|
StarcoderdataPython
|
3225657
|
class SimModule(object):
def initialize(self, sim):
pass
def save(self, sim, **kwargs):
pass
def finalize(self, sim):
pass
|
StarcoderdataPython
|
1725102
|
# -------- BEGIN LICENSE BLOCK --------
# Copyright 2022 FZI Forschungszentrum Informatik
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the {copyright_holder} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------- END LICENSE BLOCK --------
import operator
import math
from ros_bt_py_msgs.msg import Node as NodeMsg
from ros_bt_py.exceptions import BehaviorTreeException
from ros_bt_py.node import Leaf, define_bt_node
from ros_bt_py.node_config import NodeConfig, OptionRef
from ros_bt_py.helpers import MathUnaryOperator, MathBinaryOperator
from ros_bt_py.helpers import MathOperandType, MathUnaryOperandType
@define_bt_node(NodeConfig(
version='0.9.0',
options={'input_type': type,
'output_type': type},
inputs={'in': OptionRef('input_type')},
outputs={'out': OptionRef('output_type')},
max_children=0,
tags=['math', 'convert', 'variable']))
class Convert(Leaf):
"""Convert between certain types.
Useful in many cases indeed."""
def __init__(self, options=None, debug_manager=None, name=None):
super(Convert, self).__init__(options, debug_manager, name)
# check the possible conversions here
if self.options['input_type'] is self.options['output_type']:
pass
elif self.options['output_type'] is str:
# that should almost always work
pass
elif self.options['input_type'] is float and self.options['output_type'] is int:
self.logwarn('loss of precission in conversion from float to int')
elif self.options['input_type'] is bool and self.options['output_type'] is int:
self.loginfo('interpreting False as 0 and True as 1')
elif self.options['input_type'] is int and self.options['output_type'] is bool:
self.loginfo('interpreting 0 as False and != 0 as True')
elif (self.options['input_type'] in [int, float]
and self.options['output_type'] in [int, float]):
pass
else:
raise BehaviorTreeException('Conversion between "%s" and "%s" not implemented' % (
self.options['input_type'],
self.options['output_type']))
def _do_setup(self):
pass
def _do_tick(self):
if self.options['input_type'] is self.options['output_type']:
# passthrough
self.outputs['out'] = self.inputs['in']
elif self.options['output_type'] is str:
# that should almost always work
self.outputs['out'] = str(self.inputs['in'])
elif self.options['input_type'] is int and self.options['output_type'] is bool:
if self.inputs['in'] == 0:
self.outputs['out'] = False
else:
self.outputs['out'] = True
elif self.options['input_type'] is bool and self.options['output_type'] is int:
if self.inputs['in']:
self.outputs['out'] = 1
else:
self.outputs['out'] = 0
elif (self.options['input_type'] in [int, float]
and self.options['output_type'] in [int, float]):
if self.options['input_type'] is int:
if self.options['output_type'] is float:
self.outputs['out'] = float(self.inputs['in'])
elif self.options['input_type'] is float:
if self.options['output_type'] is int:
self.outputs['out'] = int(self.inputs['in'])
return NodeMsg.SUCCEEDED
def _do_shutdown(self):
pass
def _do_reset(self):
return NodeMsg.IDLE
def _do_untick(self):
return NodeMsg.IDLE
# Uncomment this if your node provides a utility calculation
#
# def _do_calculate_utility(self):
# pass
@define_bt_node(NodeConfig(
version='0.9.0',
options={'operand_type': MathOperandType,
'operator': MathBinaryOperator},
inputs={},
outputs={},
max_children=0,
tags=['math', 'operator', 'operation', 'calculation', 'result', 'variable',
'+-/*%', 'add', 'div', 'sub', 'mul', 'mod', 'and', 'or', 'xor', 'shift', 'pow']))
class Operation(Leaf):
"""Performs the desired binary operation on the inputs a and b."""
def __init__(self, options=None, debug_manager=None, name=None):
super(Operation, self).__init__(options, debug_manager, name)
self.operators = dict()
self.operators['add'] = operator.add
self.operators['+'] = operator.add
self.operators['and'] = operator.__and__
self.operators['&'] = operator.__and__
self.operators['div'] = operator.truediv
self.operators['/'] = operator.truediv
self.operators['floordiv'] = operator.floordiv
self.operators['//'] = operator.floordiv
self.operators['lshift'] = operator.lshift
self.operators['<<'] = operator.lshift
self.operators['mod'] = operator.mod
self.operators['%'] = operator.mod
self.operators['mul'] = operator.mul
self.operators['*'] = operator.mul
self.operators['or'] = operator.__or__
self.operators['|'] = operator.__or__
self.operators['pow'] = operator.pow
self.operators['**'] = operator.pow
self.operators['rshift'] = operator.rshift
self.operators['>>'] = operator.rshift
self.operators['sub'] = operator.sub
self.operators['-'] = operator.sub
self.operators['truediv'] = operator.truediv
self.operators['xor'] = operator.xor
self.operators['^'] = operator.xor
if self.options['operator'].operator not in self.operators:
raise BehaviorTreeException(
'Operator %s is not recognized.' % self.options['operator'].operator)
self.operand_type = None
if self.options['operand_type'].operand_type == 'int':
self.operand_type = int
elif self.options['operand_type'].operand_type == 'float':
self.operand_type = float
elif self.options['operand_type'].operand_type == 'bool':
self.operand_type = bool
node_inputs = {}
node_inputs['a'] = self.operand_type
node_inputs['b'] = self.operand_type
node_outputs = {}
node_outputs['result'] = self.operand_type
self.node_config.extend(NodeConfig(
options={},
inputs=node_inputs,
outputs=node_outputs,
max_children=0))
self._register_node_data(source_map=node_inputs,
target_map=self.inputs)
self._register_node_data(source_map=node_outputs,
target_map=self.outputs)
def _do_setup(self):
pass
def _do_tick(self):
self.outputs['result'] = self.operators[self.options['operator'].operator](
self.inputs['a'],
self.inputs['b']
)
return NodeMsg.SUCCEEDED
def _do_shutdown(self):
pass
def _do_reset(self):
return NodeMsg.IDLE
def _do_untick(self):
return NodeMsg.IDLE
# Uncomment this if your node provides a utility calculation
#
# def _do_calculate_utility(self):
# pass
@define_bt_node(NodeConfig(
version='0.9.0',
options={'operand_type': MathUnaryOperandType,
'operator': MathUnaryOperator},
inputs={},
outputs={},
max_children=0,
tags=['math', 'operator', 'operation', 'calculation', 'result', 'variable',
'not', 'inv', 'log', 'ceil', 'floor', 'sqrt', 'sin', 'cos', 'tan',
'degrees', 'radians', 'error', 'erf', 'gamma']))
class UnaryOperation(Leaf):
"""Performs the desired unary operation on the inputs a and b."""
def __init__(self, options=None, debug_manager=None, name=None):
super(UnaryOperation, self).__init__(options, debug_manager, name)
self.operators = dict()
self.operators['not'] = operator.not_
self.operators['inv'] = operator.inv
self.operators['~'] = operator.inv
self.operators['neg'] = operator.neg
self.operators['-'] = operator.neg
self.operators['pos'] = operator.pos
self.operators['+'] = operator.pos
self.operators['exp'] = math.exp
self.operators['expm1'] = math.expm1
self.operators['log'] = math.log
self.operators['log1p'] = math.log1p
self.operators['log10'] = math.log10
self.operators['ceil'] = math.ceil
self.operators['fabs'] = math.fabs
self.operators['factorial'] = math.factorial
self.operators['floor'] = math.floor
self.operators['sqrt'] = math.sqrt
self.operators['acos'] = math.acos
self.operators['asin'] = math.asin
self.operators['atan'] = math.atan
self.operators['acosh'] = math.acosh
self.operators['asinh'] = math.asinh
self.operators['atanh'] = math.atanh
self.operators['cos'] = math.cos
self.operators['sin'] = math.sin
self.operators['tan'] = math.tan
self.operators['cosh'] = math.cosh
self.operators['sinh'] = math.sinh
self.operators['tanh'] = math.tanh
self.operators['degrees'] = math.degrees
self.operators['radians'] = math.radians
self.operators['erf'] = math.erf
self.operators['erfc'] = math.erfc
self.operators['gamma'] = math.gamma
self.operators['lgamma'] = math.lgamma
if self.options['operator'].operator not in self.operators:
raise BehaviorTreeException(
'Operator %s is not recognized.' % self.options['operator'].operator)
self.operand_type = None
if self.options['operand_type'].operand_type == 'int':
self.operand_type = int
elif self.options['operand_type'].operand_type == 'float':
self.operand_type = float
node_inputs = {}
node_inputs['in'] = self.operand_type
node_outputs = {}
node_outputs['result'] = self.operand_type
self.node_config.extend(NodeConfig(
options={},
inputs=node_inputs,
outputs=node_outputs,
max_children=0))
self._register_node_data(source_map=node_inputs,
target_map=self.inputs)
self._register_node_data(source_map=node_outputs,
target_map=self.outputs)
def _do_setup(self):
pass
def _do_tick(self):
self.outputs['result'] = self.operators[self.options['operator'].operator](
self.inputs['in']
)
return NodeMsg.SUCCEEDED
def _do_shutdown(self):
pass
def _do_reset(self):
return NodeMsg.IDLE
def _do_untick(self):
return NodeMsg.IDLE
# Uncomment this if your node provides a utility calculation
#
# def _do_calculate_utility(self):
# pass
|
StarcoderdataPython
|
3225938
|
#!/usr/bin/env python
# run:
# python3 -mvenv ../ve3
# ../ve3/bin/pip install wheel
# ../ve3/bin/pip install --editable .
# ../ve3/bin/ag-pserver --listen tcp:8001 --controller tcp:localhost:8002
# the provisioning webpage is in html/index.html , edit it in place
from setuptools import setup
import os
setup(
name="ag-pserver",
description="provisioning server for Agoric testnet",
license="MIT",
package_dir={"": "src"},
packages=["ag_pserver"],
package_data={"ag_pserver": ['html/*']},
install_requires=[
"twisted[tls]",
"magic-wormhole",
"treq",
],
entry_points={
"console_scripts": [ "ag-pserver = ag_pserver.main:main" ],
},
include_package_data=True,
version="0.0.1",
)
|
StarcoderdataPython
|
3291450
|
<filename>stack.py
from linked_list import *
class Stack (object):
def __init__(self):
self.linked_list = LinkedList ()
def stack_size (self):
return self.linked_list.size_of_linked_list()
def is_empty (self):
return self.linked_list.size_of_linked_list() == 0
def push (self, data):
self.linked_list.insert_at_start(data)
def pop (self):
if not self.is_empty():
data = self.linked_list.get_first_element()
self.linked_list.remove(data)
return data
else:
raise Exception("No more entries in stack.")
def peek (self):
if not self.is_empty():
return self.linked_list.get_first_element()
if __name__ == '__main__':
s1 = Stack ()
s1.push (10)
s1.push (9)
s1.push (8)
print (s1.peek())
while not s1.is_empty():
print (s1.pop())
|
StarcoderdataPython
|
4820167
|
import os
class Config(object):
DEBUG = True
TESTING = True
SECRET_KEY = "<KEY>"
DATABASE_URL = os.environ.get("TRUNKS_DATABASE_URL")
|
StarcoderdataPython
|
3354831
|
<gh_stars>0
import pdb
def spam(eggs):
print('eggs:', eggs)
if __name__ == '__main__':
pdb.set_trace()
for i in range(5):
spam(i)
|
StarcoderdataPython
|
3528
|
<filename>pysc2/lib/actions.py
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of types and actions for SC2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import six
from pysc2.lib import point
from s2clientprotocol import spatial_pb2 as sc_spatial
from s2clientprotocol import ui_pb2 as sc_ui
def no_op(action):
del action
def move_camera(action, minimap):
"""Move the camera."""
minimap.assign_to(action.action_feature_layer.camera_move.center_minimap)
def select_point(action, select_point_act, screen):
"""Select a unit at a point."""
select = action.action_feature_layer.unit_selection_point
screen.assign_to(select.selection_screen_coord)
select.type = select_point_act
def select_rect(action, select_add, screen, screen2):
"""Select units within a rectangle."""
select = action.action_feature_layer.unit_selection_rect
out_rect = select.selection_screen_coord.add()
screen_rect = point.Rect(screen, screen2)
screen_rect.tl.assign_to(out_rect.p0)
screen_rect.br.assign_to(out_rect.p1)
select.selection_add = bool(select_add)
def select_idle_worker(action, select_worker):
"""Select an idle worker."""
action.action_ui.select_idle_worker.type = select_worker
def select_army(action, select_add):
"""Select the entire army."""
action.action_ui.select_army.selection_add = select_add
def select_warp_gates(action, select_add):
"""Select all warp gates."""
action.action_ui.select_warp_gates.selection_add = select_add
def select_larva(action):
"""Select all larva."""
action.action_ui.select_larva.SetInParent() # Adds the empty proto field.
def select_unit(action, select_unit_act, select_unit_id):
"""Select a specific unit from the multi-unit selection."""
select = action.action_ui.multi_panel
select.type = select_unit_act
select.unit_index = select_unit_id
def control_group(action, control_group_act, control_group_id):
"""Act on a control group, selecting, setting, etc."""
select = action.action_ui.control_group
select.action = control_group_act
select.control_group_index = control_group_id
def unload(action, unload_id):
"""Unload a unit from a transport/bunker/nydus/etc."""
action.action_ui.cargo_panel.unit_index = unload_id
def build_queue(action, build_queue_id):
"""Cancel a unit in the build queue."""
action.action_ui.production_panel.unit_index = build_queue_id
def cmd_quick(action, ability_id, queued):
"""Do a quick command like 'Stop' or 'Stim'."""
action_cmd = action.action_feature_layer.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
def cmd_screen(action, ability_id, queued, screen):
"""Do a command that needs a point on the screen."""
action_cmd = action.action_feature_layer.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
screen.assign_to(action_cmd.target_screen_coord)
def cmd_minimap(action, ability_id, queued, minimap):
"""Do a command that needs a point on the minimap."""
action_cmd = action.action_feature_layer.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
minimap.assign_to(action_cmd.target_minimap_coord)
def autocast(action, ability_id):
"""Toggle autocast."""
action.action_ui.toggle_autocast.ability_id = ability_id
class ArgumentType(collections.namedtuple(
"ArgumentType", ["id", "name", "sizes", "fn"])):
"""Represents a single argument type.
Attributes:
id: The argument id. This is unique.
name: The name of the argument, also unique.
sizes: The max+1 of each of the dimensions this argument takes.
fn: The function to convert the list of integers into something more
meaningful to be set in the protos to send to the game.
"""
__slots__ = ()
def __str__(self):
return "%s/%s %s" % (self.id, self.name, list(self.sizes))
@classmethod
def enum(cls, options):
"""Create an ArgumentType where you choose one of a set of known values."""
return cls(-1, "<none>", (len(options),), lambda a: options[a[0]])
@classmethod
def scalar(cls, value):
"""Create an ArgumentType with a single scalar in range(value)."""
return cls(-1, "<none>", (value,), lambda a: a[0])
@classmethod
def point(cls): # No range because it's unknown at this time.
"""Create an ArgumentType that is represented by a point.Point."""
return cls(-1, "<none>", (0, 0), lambda a: point.Point(*a).floor())
@classmethod
def spec(cls, id_, name, sizes):
"""Create an ArgumentType to be used in ValidActions."""
return cls(id_, name, sizes, None)
class Arguments(collections.namedtuple("Arguments", [
"screen", "minimap", "screen2", "queued", "control_group_act",
"control_group_id", "select_point_act", "select_add", "select_unit_act",
"select_unit_id", "select_worker", "build_queue_id", "unload_id"])):
"""The full list of argument types.
Take a look at TYPES and FUNCTION_TYPES for more details.
Attributes:
screen: A point on the screen.
minimap: A point on the minimap.
screen2: The second point for a rectangle. This is needed so that no
function takes the same type twice.
queued: Whether the action should be done now or later.
control_group_act: What to do with the control group.
control_group_id: Which control group to do it with.
select_point_act: What to do with the unit at the point.
select_add: Whether to add the unit to the selection or replace it.
select_unit_act: What to do when selecting a unit by id.
select_unit_id: Which unit to select by id.
select_worker: What to do when selecting a worker.
build_queue_id: Which build queue index to target.
unload_id: Which unit to target in a transport/nydus/command center.
"""
___slots__ = ()
@classmethod
def types(cls, **kwargs):
"""Create an Arguments of the possible Types."""
named = {name: type_._replace(id=Arguments._fields.index(name), name=name)
for name, type_ in six.iteritems(kwargs)}
return cls(**named)
# The list of known types.
TYPES = Arguments.types(
screen=ArgumentType.point(),
minimap=ArgumentType.point(),
screen2=ArgumentType.point(),
queued=ArgumentType.enum([False, True]), # (now vs add to queue)
control_group_act=ArgumentType.enum([
sc_ui.ActionControlGroup.Recall,
sc_ui.ActionControlGroup.Set,
sc_ui.ActionControlGroup.Append,
sc_ui.ActionControlGroup.SetAndSteal,
sc_ui.ActionControlGroup.AppendAndSteal,
]),
control_group_id=ArgumentType.scalar(10),
select_point_act=ArgumentType.enum([
sc_spatial.ActionSpatialUnitSelectionPoint.Select,
sc_spatial.ActionSpatialUnitSelectionPoint.Toggle,
sc_spatial.ActionSpatialUnitSelectionPoint.AllType,
sc_spatial.ActionSpatialUnitSelectionPoint.AddAllType,
]),
select_add=ArgumentType.enum([False, True]), # (select vs select_add)
select_unit_act=ArgumentType.enum([
sc_ui.ActionMultiPanel.SingleSelect,
sc_ui.ActionMultiPanel.DeselectUnit,
sc_ui.ActionMultiPanel.SelectAllOfType,
sc_ui.ActionMultiPanel.DeselectAllOfType,
]),
select_unit_id=ArgumentType.scalar(500), # Depends on current selection.
select_worker=ArgumentType.enum([
sc_ui.ActionSelectIdleWorker.Set,
sc_ui.ActionSelectIdleWorker.Add,
sc_ui.ActionSelectIdleWorker.All,
sc_ui.ActionSelectIdleWorker.AddAll,
]),
build_queue_id=ArgumentType.scalar(10), # Depends on current build queue.
unload_id=ArgumentType.scalar(500), # Depends on the current loaded units.
)
# Which argument types do each function need?
FUNCTION_TYPES = {
no_op: [],
move_camera: [TYPES.minimap],
select_point: [TYPES.select_point_act, TYPES.screen],
select_rect: [TYPES.select_add, TYPES.screen, TYPES.screen2],
select_unit: [TYPES.select_unit_act, TYPES.select_unit_id],
control_group: [TYPES.control_group_act, TYPES.control_group_id],
select_idle_worker: [TYPES.select_worker],
select_army: [TYPES.select_add],
select_warp_gates: [TYPES.select_add],
select_larva: [],
unload: [TYPES.unload_id],
build_queue: [TYPES.build_queue_id],
cmd_quick: [TYPES.queued],
cmd_screen: [TYPES.queued, TYPES.screen],
cmd_minimap: [TYPES.queued, TYPES.minimap],
autocast: [],
}
# Which ones need an ability?
ABILITY_FUNCTIONS = {cmd_quick, cmd_screen, cmd_minimap, autocast}
# Which ones require a point?
POINT_REQUIRED_FUNCS = {
False: {cmd_quick, autocast},
True: {cmd_screen, cmd_minimap, autocast}}
always = lambda _: True
class Function(collections.namedtuple(
"Function", ["id", "name", "ability_id", "general_id", "function_type",
"args", "avail_fn"])):
"""Represents a function action.
Attributes:
id: The function id, which is what the agent will use.
name: The name of the function. Should be unique.
ability_id: The ability id to pass to sc2.
general_id: 0 for normal abilities, and the ability_id of another ability if
it can be represented by a more general action.
function_type: One of the functions in FUNCTION_TYPES for how to construct
the sc2 action proto out of python types.
args: A list of the types of args passed to function_type.
avail_fn: For non-abilities, this function returns whether the function is
valid.
"""
__slots__ = ()
@classmethod
def ui_func(cls, id_, name, function_type, avail_fn=always):
"""Define a function representing a ui action."""
return cls(id_, name, 0, 0, function_type, FUNCTION_TYPES[function_type],
avail_fn)
@classmethod
def ability(cls, id_, name, function_type, ability_id, general_id=0):
"""Define a function represented as a game ability."""
assert function_type in ABILITY_FUNCTIONS
return cls(id_, name, ability_id, general_id, function_type,
FUNCTION_TYPES[function_type], None)
@classmethod
def spec(cls, id_, name, args):
"""Create a Function to be used in ValidActions."""
return cls(id_, name, None, None, None, args, None)
def __hash__(self): # So it can go in a set().
return self.id
def __str__(self):
return self.str()
def str(self, space=False):
"""String version. Set space=True to line them all up nicely."""
return "%s/%s (%s)" % (str(self.id).rjust(space and 4),
self.name.ljust(space and 50),
"; ".join(str(a) for a in self.args))
class Functions(object):
"""Represents the full set of functions.
Can't use namedtuple since python3 has a limit of 255 function arguments, so
build something similar.
"""
def __init__(self, functions):
self._func_list = functions
self._func_dict = {f.name: f for f in functions}
if len(self._func_dict) != len(self._func_list):
raise ValueError("Function names must be unique.")
def __getattr__(self, name):
return self._func_dict[name]
def __getitem__(self, key):
if isinstance(key, numbers.Number):
return self._func_list[key]
return self._func_dict[key]
def __iter__(self):
return iter(self._func_list)
def __len__(self):
return len(self._func_list)
# pylint: disable=line-too-long
FUNCTIONS = Functions([
Function.ui_func(0, "no_op", no_op),
Function.ui_func(1, "move_camera", move_camera),
Function.ui_func(2, "select_point", select_point),
Function.ui_func(3, "select_rect", select_rect),
Function.ui_func(4, "select_control_group", control_group),
Function.ui_func(5, "select_unit", select_unit,
lambda obs: obs.ui_data.HasField("multi")),
Function.ui_func(6, "select_idle_worker", select_idle_worker,
lambda obs: obs.player_common.idle_worker_count > 0),
Function.ui_func(7, "select_army", select_army,
lambda obs: obs.player_common.army_count > 0),
Function.ui_func(8, "select_warp_gates", select_warp_gates,
lambda obs: obs.player_common.warp_gate_count > 0),
Function.ui_func(9, "select_larva", select_larva,
lambda obs: obs.player_common.larva_count > 0),
Function.ui_func(10, "unload", unload,
lambda obs: obs.ui_data.HasField("cargo")),
Function.ui_func(11, "build_queue", build_queue,
lambda obs: obs.ui_data.HasField("production")),
# Everything below here is generated with gen_actions.py
Function.ability(12, "Attack_screen", cmd_screen, 3674),
Function.ability(13, "Attack_minimap", cmd_minimap, 3674),
Function.ability(14, "Attack_Attack_screen", cmd_screen, 23, 3674),
Function.ability(15, "Attack_Attack_minimap", cmd_minimap, 23, 3674),
Function.ability(16, "Attack_AttackBuilding_screen", cmd_screen, 2048, 3674),
Function.ability(17, "Attack_AttackBuilding_minimap", cmd_minimap, 2048, 3674),
Function.ability(18, "Attack_Redirect_screen", cmd_screen, 1682, 3674),
Function.ability(19, "Scan_Move_screen", cmd_screen, 19, 3674),
Function.ability(20, "Scan_Move_minimap", cmd_minimap, 19, 3674),
Function.ability(21, "Behavior_BuildingAttackOff_quick", cmd_quick, 2082),
Function.ability(22, "Behavior_BuildingAttackOn_quick", cmd_quick, 2081),
Function.ability(23, "Behavior_CloakOff_quick", cmd_quick, 3677),
Function.ability(24, "Behavior_CloakOff_Banshee_quick", cmd_quick, 393, 3677),
Function.ability(25, "Behavior_CloakOff_Ghost_quick", cmd_quick, 383, 3677),
Function.ability(26, "Behavior_CloakOn_quick", cmd_quick, 3676),
Function.ability(27, "Behavior_CloakOn_Banshee_quick", cmd_quick, 392, 3676),
Function.ability(28, "Behavior_CloakOn_Ghost_quick", cmd_quick, 382, 3676),
Function.ability(29, "Behavior_GenerateCreepOff_quick", cmd_quick, 1693),
Function.ability(30, "Behavior_GenerateCreepOn_quick", cmd_quick, 1692),
Function.ability(31, "Behavior_HoldFireOff_quick", cmd_quick, 3689),
Function.ability(32, "Behavior_HoldFireOff_Ghost_quick", cmd_quick, 38, 3689),
Function.ability(33, "Behavior_HoldFireOff_Lurker_quick", cmd_quick, 2552, 3689),
Function.ability(34, "Behavior_HoldFireOn_quick", cmd_quick, 3688),
Function.ability(35, "Behavior_HoldFireOn_Ghost_quick", cmd_quick, 36, 3688),
Function.ability(36, "Behavior_HoldFireOn_Lurker_quick", cmd_quick, 2550, 3688),
Function.ability(37, "Behavior_PulsarBeamOff_quick", cmd_quick, 2376),
Function.ability(38, "Behavior_PulsarBeamOn_quick", cmd_quick, 2375),
Function.ability(39, "Build_Armory_screen", cmd_screen, 331),
Function.ability(40, "Build_Assimilator_screen", cmd_screen, 882),
Function.ability(41, "Build_BanelingNest_screen", cmd_screen, 1162),
Function.ability(42, "Build_Barracks_screen", cmd_screen, 321),
Function.ability(43, "Build_Bunker_screen", cmd_screen, 324),
Function.ability(44, "Build_CommandCenter_screen", cmd_screen, 318),
Function.ability(45, "Build_CreepTumor_screen", cmd_screen, 3691),
Function.ability(46, "Build_CreepTumor_Queen_screen", cmd_screen, 1694, 3691),
Function.ability(47, "Build_CreepTumor_Tumor_screen", cmd_screen, 1733, 3691),
Function.ability(48, "Build_CyberneticsCore_screen", cmd_screen, 894),
Function.ability(49, "Build_DarkShrine_screen", cmd_screen, 891),
Function.ability(50, "Build_EngineeringBay_screen", cmd_screen, 322),
Function.ability(51, "Build_EvolutionChamber_screen", cmd_screen, 1156),
Function.ability(52, "Build_Extractor_screen", cmd_screen, 1154),
Function.ability(53, "Build_Factory_screen", cmd_screen, 328),
Function.ability(54, "Build_FleetBeacon_screen", cmd_screen, 885),
Function.ability(55, "Build_Forge_screen", cmd_screen, 884),
Function.ability(56, "Build_FusionCore_screen", cmd_screen, 333),
Function.ability(57, "Build_Gateway_screen", cmd_screen, 883),
Function.ability(58, "Build_GhostAcademy_screen", cmd_screen, 327),
Function.ability(59, "Build_Hatchery_screen", cmd_screen, 1152),
Function.ability(60, "Build_HydraliskDen_screen", cmd_screen, 1157),
Function.ability(61, "Build_InfestationPit_screen", cmd_screen, 1160),
Function.ability(62, "Build_Interceptors_quick", cmd_quick, 1042),
Function.ability(63, "Build_Interceptors_autocast", autocast, 1042),
Function.ability(64, "Build_MissileTurret_screen", cmd_screen, 323),
Function.ability(65, "Build_Nexus_screen", cmd_screen, 880),
Function.ability(66, "Build_Nuke_quick", cmd_quick, 710),
Function.ability(67, "Build_NydusNetwork_screen", cmd_screen, 1161),
Function.ability(68, "Build_NydusWorm_screen", cmd_screen, 1768),
Function.ability(69, "Build_PhotonCannon_screen", cmd_screen, 887),
Function.ability(70, "Build_Pylon_screen", cmd_screen, 881),
Function.ability(71, "Build_Reactor_quick", cmd_quick, 3683),
Function.ability(72, "Build_Reactor_screen", cmd_screen, 3683),
Function.ability(73, "Build_Reactor_Barracks_quick", cmd_quick, 422, 3683),
Function.ability(74, "Build_Reactor_Barracks_screen", cmd_screen, 422, 3683),
Function.ability(75, "Build_Reactor_Factory_quick", cmd_quick, 455, 3683),
Function.ability(76, "Build_Reactor_Factory_screen", cmd_screen, 455, 3683),
Function.ability(77, "Build_Reactor_Starport_quick", cmd_quick, 488, 3683),
Function.ability(78, "Build_Reactor_Starport_screen", cmd_screen, 488, 3683),
Function.ability(79, "Build_Refinery_screen", cmd_screen, 320),
Function.ability(80, "Build_RoachWarren_screen", cmd_screen, 1165),
Function.ability(81, "Build_RoboticsBay_screen", cmd_screen, 892),
Function.ability(82, "Build_RoboticsFacility_screen", cmd_screen, 893),
Function.ability(83, "Build_SensorTower_screen", cmd_screen, 326),
Function.ability(84, "Build_SpawningPool_screen", cmd_screen, 1155),
Function.ability(85, "Build_SpineCrawler_screen", cmd_screen, 1166),
Function.ability(86, "Build_Spire_screen", cmd_screen, 1158),
Function.ability(87, "Build_SporeCrawler_screen", cmd_screen, 1167),
Function.ability(88, "Build_Stargate_screen", cmd_screen, 889),
Function.ability(89, "Build_Starport_screen", cmd_screen, 329),
Function.ability(90, "Build_StasisTrap_screen", cmd_screen, 2505),
Function.ability(91, "Build_SupplyDepot_screen", cmd_screen, 319),
Function.ability(92, "Build_TechLab_quick", cmd_quick, 3682),
Function.ability(93, "Build_TechLab_screen", cmd_screen, 3682),
Function.ability(94, "Build_TechLab_Barracks_quick", cmd_quick, 421, 3682),
Function.ability(95, "Build_TechLab_Barracks_screen", cmd_screen, 421, 3682),
Function.ability(96, "Build_TechLab_Factory_quick", cmd_quick, 454, 3682),
Function.ability(97, "Build_TechLab_Factory_screen", cmd_screen, 454, 3682),
Function.ability(98, "Build_TechLab_Starport_quick", cmd_quick, 487, 3682),
Function.ability(99, "Build_TechLab_Starport_screen", cmd_screen, 487, 3682),
Function.ability(100, "Build_TemplarArchive_screen", cmd_screen, 890),
Function.ability(101, "Build_TwilightCouncil_screen", cmd_screen, 886),
Function.ability(102, "Build_UltraliskCavern_screen", cmd_screen, 1159),
Function.ability(103, "BurrowDown_quick", cmd_quick, 3661),
Function.ability(104, "BurrowDown_Baneling_quick", cmd_quick, 1374, 3661),
Function.ability(105, "BurrowDown_Drone_quick", cmd_quick, 1378, 3661),
Function.ability(106, "BurrowDown_Hydralisk_quick", cmd_quick, 1382, 3661),
Function.ability(107, "BurrowDown_Infestor_quick", cmd_quick, 1444, 3661),
Function.ability(108, "BurrowDown_InfestorTerran_quick", cmd_quick, 1394, 3661),
Function.ability(109, "BurrowDown_Lurker_quick", cmd_quick, 2108, 3661),
Function.ability(110, "BurrowDown_Queen_quick", cmd_quick, 1433, 3661),
Function.ability(111, "BurrowDown_Ravager_quick", cmd_quick, 2340, 3661),
Function.ability(112, "BurrowDown_Roach_quick", cmd_quick, 1386, 3661),
Function.ability(113, "BurrowDown_SwarmHost_quick", cmd_quick, 2014, 3661),
Function.ability(114, "BurrowDown_Ultralisk_quick", cmd_quick, 1512, 3661),
Function.ability(115, "BurrowDown_WidowMine_quick", cmd_quick, 2095, 3661),
Function.ability(116, "BurrowDown_Zergling_quick", cmd_quick, 1390, 3661),
Function.ability(117, "BurrowUp_quick", cmd_quick, 3662),
Function.ability(118, "BurrowUp_autocast", autocast, 3662),
Function.ability(119, "BurrowUp_Baneling_quick", cmd_quick, 1376, 3662),
Function.ability(120, "BurrowUp_Baneling_autocast", autocast, 1376, 3662),
Function.ability(121, "BurrowUp_Drone_quick", cmd_quick, 1380, 3662),
Function.ability(122, "BurrowUp_Hydralisk_quick", cmd_quick, 1384, 3662),
Function.ability(123, "BurrowUp_Hydralisk_autocast", autocast, 1384, 3662),
Function.ability(124, "BurrowUp_Infestor_quick", cmd_quick, 1446, 3662),
Function.ability(125, "BurrowUp_InfestorTerran_quick", cmd_quick, 1396, 3662),
Function.ability(126, "BurrowUp_InfestorTerran_autocast", autocast, 1396, 3662),
Function.ability(127, "BurrowUp_Lurker_quick", cmd_quick, 2110, 3662),
Function.ability(128, "BurrowUp_Queen_quick", cmd_quick, 1435, 3662),
Function.ability(129, "BurrowUp_Queen_autocast", autocast, 1435, 3662),
Function.ability(130, "BurrowUp_Ravager_quick", cmd_quick, 2342, 3662),
Function.ability(131, "BurrowUp_Ravager_autocast", autocast, 2342, 3662),
Function.ability(132, "BurrowUp_Roach_quick", cmd_quick, 1388, 3662),
Function.ability(133, "BurrowUp_Roach_autocast", autocast, 1388, 3662),
Function.ability(134, "BurrowUp_SwarmHost_quick", cmd_quick, 2016, 3662),
Function.ability(135, "BurrowUp_Ultralisk_quick", cmd_quick, 1514, 3662),
Function.ability(136, "BurrowUp_Ultralisk_autocast", autocast, 1514, 3662),
Function.ability(137, "BurrowUp_WidowMine_quick", cmd_quick, 2097, 3662),
Function.ability(138, "BurrowUp_Zergling_quick", cmd_quick, 1392, 3662),
Function.ability(139, "BurrowUp_Zergling_autocast", autocast, 1392, 3662),
Function.ability(140, "Cancel_quick", cmd_quick, 3659),
Function.ability(141, "Cancel_AdeptPhaseShift_quick", cmd_quick, 2594, 3659),
Function.ability(142, "Cancel_AdeptShadePhaseShift_quick", cmd_quick, 2596, 3659),
Function.ability(143, "Cancel_BarracksAddOn_quick", cmd_quick, 451, 3659),
Function.ability(144, "Cancel_BuildInProgress_quick", cmd_quick, 314, 3659),
Function.ability(145, "Cancel_CreepTumor_quick", cmd_quick, 1763, 3659),
Function.ability(146, "Cancel_FactoryAddOn_quick", cmd_quick, 484, 3659),
Function.ability(147, "Cancel_GravitonBeam_quick", cmd_quick, 174, 3659),
Function.ability(148, "Cancel_LockOn_quick", cmd_quick, 2354, 3659),
Function.ability(149, "Cancel_MorphBroodlord_quick", cmd_quick, 1373, 3659),
Function.ability(150, "Cancel_MorphGreaterSpire_quick", cmd_quick, 1221, 3659),
Function.ability(151, "Cancel_MorphHive_quick", cmd_quick, 1219, 3659),
Function.ability(152, "Cancel_MorphLair_quick", cmd_quick, 1217, 3659),
Function.ability(153, "Cancel_MorphLurker_quick", cmd_quick, 2333, 3659),
Function.ability(154, "Cancel_MorphLurkerDen_quick", cmd_quick, 2113, 3659),
Function.ability(155, "Cancel_MorphMothership_quick", cmd_quick, 1848, 3659),
Function.ability(156, "Cancel_MorphOrbital_quick", cmd_quick, 1517, 3659),
Function.ability(157, "Cancel_MorphOverlordTransport_quick", cmd_quick, 2709, 3659),
Function.ability(158, "Cancel_MorphOverseer_quick", cmd_quick, 1449, 3659),
Function.ability(159, "Cancel_MorphPlanetaryFortress_quick", cmd_quick, 1451, 3659),
Function.ability(160, "Cancel_MorphRavager_quick", cmd_quick, 2331, 3659),
Function.ability(161, "Cancel_MorphThorExplosiveMode_quick", cmd_quick, 2365, 3659),
Function.ability(162, "Cancel_NeuralParasite_quick", cmd_quick, 250, 3659),
Function.ability(163, "Cancel_Nuke_quick", cmd_quick, 1623, 3659),
Function.ability(164, "Cancel_SpineCrawlerRoot_quick", cmd_quick, 1730, 3659),
Function.ability(165, "Cancel_SporeCrawlerRoot_quick", cmd_quick, 1732, 3659),
Function.ability(166, "Cancel_StarportAddOn_quick", cmd_quick, 517, 3659),
Function.ability(167, "Cancel_StasisTrap_quick", cmd_quick, 2535, 3659),
Function.ability(168, "Cancel_Last_quick", cmd_quick, 3671),
Function.ability(169, "Cancel_HangarQueue5_quick", cmd_quick, 1038, 3671),
Function.ability(170, "Cancel_Queue1_quick", cmd_quick, 304, 3671),
Function.ability(171, "Cancel_Queue5_quick", cmd_quick, 306, 3671),
Function.ability(172, "Cancel_QueueAddOn_quick", cmd_quick, 312, 3671),
Function.ability(173, "Cancel_QueueCancelToSelection_quick", cmd_quick, 308, 3671),
Function.ability(174, "Cancel_QueuePasive_quick", cmd_quick, 1831, 3671),
Function.ability(175, "Cancel_QueuePassiveCancelToSelection_quick", cmd_quick, 1833, 3671),
Function.ability(176, "Effect_Abduct_screen", cmd_screen, 2067),
Function.ability(177, "Effect_AdeptPhaseShift_screen", cmd_screen, 2544),
Function.ability(178, "Effect_AutoTurret_screen", cmd_screen, 1764),
Function.ability(179, "Effect_BlindingCloud_screen", cmd_screen, 2063),
Function.ability(180, "Effect_Blink_screen", cmd_screen, 3687),
Function.ability(181, "Effect_Blink_Stalker_screen", cmd_screen, 1442, 3687),
Function.ability(182, "Effect_ShadowStride_screen", cmd_screen, 2700, 3687),
Function.ability(183, "Effect_CalldownMULE_screen", cmd_screen, 171),
Function.ability(184, "Effect_CausticSpray_screen", cmd_screen, 2324),
Function.ability(185, "Effect_Charge_screen", cmd_screen, 1819),
Function.ability(186, "Effect_Charge_autocast", autocast, 1819),
Function.ability(187, "Effect_ChronoBoost_screen", cmd_screen, 261),
Function.ability(188, "Effect_Contaminate_screen", cmd_screen, 1825),
Function.ability(189, "Effect_CorrosiveBile_screen", cmd_screen, 2338),
Function.ability(190, "Effect_EMP_screen", cmd_screen, 1628),
Function.ability(191, "Effect_Explode_quick", cmd_quick, 42),
Function.ability(192, "Effect_Feedback_screen", cmd_screen, 140),
Function.ability(193, "Effect_ForceField_screen", cmd_screen, 1526),
Function.ability(194, "Effect_FungalGrowth_screen", cmd_screen, 74),
Function.ability(195, "Effect_GhostSnipe_screen", cmd_screen, 2714),
Function.ability(196, "Effect_GravitonBeam_screen", cmd_screen, 173),
Function.ability(197, "Effect_GuardianShield_quick", cmd_quick, 76),
Function.ability(198, "Effect_Heal_screen", cmd_screen, 386),
Function.ability(199, "Effect_Heal_autocast", autocast, 386),
Function.ability(200, "Effect_HunterSeekerMissile_screen", cmd_screen, 169),
Function.ability(201, "Effect_ImmortalBarrier_quick", cmd_quick, 2328),
Function.ability(202, "Effect_ImmortalBarrier_autocast", autocast, 2328),
Function.ability(203, "Effect_InfestedTerrans_screen", cmd_screen, 247),
Function.ability(204, "Effect_InjectLarva_screen", cmd_screen, 251),
Function.ability(205, "Effect_KD8Charge_screen", cmd_screen, 2588),
Function.ability(206, "Effect_LockOn_screen", cmd_screen, 2350),
Function.ability(207, "Effect_LocustSwoop_screen", cmd_screen, 2387),
Function.ability(208, "Effect_MassRecall_screen", cmd_screen, 3686),
Function.ability(209, "Effect_MassRecall_Mothership_screen", cmd_screen, 2368, 3686),
Function.ability(210, "Effect_MassRecall_MothershipCore_screen", cmd_screen, 1974, 3686),
Function.ability(211, "Effect_MedivacIgniteAfterburners_quick", cmd_quick, 2116),
Function.ability(212, "Effect_NeuralParasite_screen", cmd_screen, 249),
Function.ability(213, "Effect_NukeCalldown_screen", cmd_screen, 1622),
Function.ability(214, "Effect_OracleRevelation_screen", cmd_screen, 2146),
Function.ability(215, "Effect_ParasiticBomb_screen", cmd_screen, 2542),
Function.ability(216, "Effect_PhotonOvercharge_screen", cmd_screen, 2162),
Function.ability(217, "Effect_PointDefenseDrone_screen", cmd_screen, 144),
Function.ability(218, "Effect_PsiStorm_screen", cmd_screen, 1036),
Function.ability(219, "Effect_PurificationNova_screen", cmd_screen, 2346),
Function.ability(220, "Effect_Repair_screen", cmd_screen, 3685),
Function.ability(221, "Effect_Repair_autocast", autocast, 3685),
Function.ability(222, "Effect_Repair_Mule_screen", cmd_screen, 78, 3685),
Function.ability(223, "Effect_Repair_Mule_autocast", autocast, 78, 3685),
Function.ability(224, "Effect_Repair_SCV_screen", cmd_screen, 316, 3685),
Function.ability(225, "Effect_Repair_SCV_autocast", autocast, 316, 3685),
Function.ability(226, "Effect_Salvage_quick", cmd_quick, 32),
Function.ability(227, "Effect_Scan_screen", cmd_screen, 399),
Function.ability(228, "Effect_SpawnChangeling_quick", cmd_quick, 181),
Function.ability(229, "Effect_SpawnLocusts_screen", cmd_screen, 2704),
Function.ability(230, "Effect_Spray_screen", cmd_screen, 3684),
Function.ability(231, "Effect_Spray_Protoss_screen", cmd_screen, 30, 3684),
Function.ability(232, "Effect_Spray_Terran_screen", cmd_screen, 26, 3684),
Function.ability(233, "Effect_Spray_Zerg_screen", cmd_screen, 28, 3684),
Function.ability(234, "Effect_Stim_quick", cmd_quick, 3675),
Function.ability(235, "Effect_Stim_Marauder_quick", cmd_quick, 253, 3675),
Function.ability(236, "Effect_Stim_Marauder_Redirect_quick", cmd_quick, 1684, 3675),
Function.ability(237, "Effect_Stim_Marine_quick", cmd_quick, 380, 3675),
Function.ability(238, "Effect_Stim_Marine_Redirect_quick", cmd_quick, 1683, 3675),
Function.ability(239, "Effect_SupplyDrop_screen", cmd_screen, 255),
Function.ability(240, "Effect_TacticalJump_screen", cmd_screen, 2358),
Function.ability(241, "Effect_TimeWarp_screen", cmd_screen, 2244),
Function.ability(242, "Effect_Transfusion_screen", cmd_screen, 1664),
Function.ability(243, "Effect_ViperConsume_screen", cmd_screen, 2073),
Function.ability(244, "Effect_VoidRayPrismaticAlignment_quick", cmd_quick, 2393),
Function.ability(245, "Effect_WidowMineAttack_screen", cmd_screen, 2099),
Function.ability(246, "Effect_WidowMineAttack_autocast", autocast, 2099),
Function.ability(247, "Effect_YamatoGun_screen", cmd_screen, 401),
Function.ability(248, "Hallucination_Adept_quick", cmd_quick, 2391),
Function.ability(249, "Hallucination_Archon_quick", cmd_quick, 146),
Function.ability(250, "Hallucination_Colossus_quick", cmd_quick, 148),
Function.ability(251, "Hallucination_Disruptor_quick", cmd_quick, 2389),
Function.ability(252, "Hallucination_HighTemplar_quick", cmd_quick, 150),
Function.ability(253, "Hallucination_Immortal_quick", cmd_quick, 152),
Function.ability(254, "Hallucination_Oracle_quick", cmd_quick, 2114),
Function.ability(255, "Hallucination_Phoenix_quick", cmd_quick, 154),
Function.ability(256, "Hallucination_Probe_quick", cmd_quick, 156),
Function.ability(257, "Hallucination_Stalker_quick", cmd_quick, 158),
Function.ability(258, "Hallucination_VoidRay_quick", cmd_quick, 160),
Function.ability(259, "Hallucination_WarpPrism_quick", cmd_quick, 162),
Function.ability(260, "Hallucination_Zealot_quick", cmd_quick, 164),
Function.ability(261, "Halt_quick", cmd_quick, 3660),
Function.ability(262, "Halt_Building_quick", cmd_quick, 315, 3660),
Function.ability(263, "Halt_TerranBuild_quick", cmd_quick, 348, 3660),
Function.ability(264, "Harvest_Gather_screen", cmd_screen, 3666),
Function.ability(265, "Harvest_Gather_Drone_screen", cmd_screen, 1183, 3666),
Function.ability(266, "Harvest_Gather_Mule_screen", cmd_screen, 166, 3666),
Function.ability(267, "Harvest_Gather_Probe_screen", cmd_screen, 298, 3666),
Function.ability(268, "Harvest_Gather_SCV_screen", cmd_screen, 295, 3666),
Function.ability(269, "Harvest_Return_quick", cmd_quick, 3667),
Function.ability(270, "Harvest_Return_Drone_quick", cmd_quick, 1184, 3667),
Function.ability(271, "Harvest_Return_Mule_quick", cmd_quick, 167, 3667),
Function.ability(272, "Harvest_Return_Probe_quick", cmd_quick, 299, 3667),
Function.ability(273, "Harvest_Return_SCV_quick", cmd_quick, 296, 3667),
Function.ability(274, "HoldPosition_quick", cmd_quick, 18),
Function.ability(275, "Land_screen", cmd_screen, 3678),
Function.ability(276, "Land_Barracks_screen", cmd_screen, 554, 3678),
Function.ability(277, "Land_CommandCenter_screen", cmd_screen, 419, 3678),
Function.ability(278, "Land_Factory_screen", cmd_screen, 520, 3678),
Function.ability(279, "Land_OrbitalCommand_screen", cmd_screen, 1524, 3678),
Function.ability(280, "Land_Starport_screen", cmd_screen, 522, 3678),
Function.ability(281, "Lift_quick", cmd_quick, 3679),
Function.ability(282, "Lift_Barracks_quick", cmd_quick, 452, 3679),
Function.ability(283, "Lift_CommandCenter_quick", cmd_quick, 417, 3679),
Function.ability(284, "Lift_Factory_quick", cmd_quick, 485, 3679),
Function.ability(285, "Lift_OrbitalCommand_quick", cmd_quick, 1522, 3679),
Function.ability(286, "Lift_Starport_quick", cmd_quick, 518, 3679),
Function.ability(287, "Load_screen", cmd_screen, 3668),
Function.ability(288, "Load_Bunker_screen", cmd_screen, 407, 3668),
Function.ability(289, "Load_Medivac_screen", cmd_screen, 394, 3668),
Function.ability(290, "Load_NydusNetwork_screen", cmd_screen, 1437, 3668),
Function.ability(291, "Load_NydusWorm_screen", cmd_screen, 2370, 3668),
Function.ability(292, "Load_Overlord_screen", cmd_screen, 1406, 3668),
Function.ability(293, "Load_WarpPrism_screen", cmd_screen, 911, 3668),
Function.ability(294, "LoadAll_quick", cmd_quick, 3663),
Function.ability(295, "LoadAll_CommandCenter_quick", cmd_quick, 416, 3663),
Function.ability(296, "Morph_Archon_quick", cmd_quick, 1766),
Function.ability(297, "Morph_BroodLord_quick", cmd_quick, 1372),
Function.ability(298, "Morph_Gateway_quick", cmd_quick, 1520),
Function.ability(299, "Morph_GreaterSpire_quick", cmd_quick, 1220),
Function.ability(300, "Morph_Hellbat_quick", cmd_quick, 1998),
Function.ability(301, "Morph_Hellion_quick", cmd_quick, 1978),
Function.ability(302, "Morph_Hive_quick", cmd_quick, 1218),
Function.ability(303, "Morph_Lair_quick", cmd_quick, 1216),
Function.ability(304, "Morph_LiberatorAAMode_quick", cmd_quick, 2560),
Function.ability(305, "Morph_LiberatorAGMode_screen", cmd_screen, 2558),
Function.ability(306, "Morph_Lurker_quick", cmd_quick, 2332),
Function.ability(307, "Morph_LurkerDen_quick", cmd_quick, 2112),
Function.ability(308, "Morph_Mothership_quick", cmd_quick, 1847),
Function.ability(309, "Morph_OrbitalCommand_quick", cmd_quick, 1516),
Function.ability(310, "Morph_OverlordTransport_quick", cmd_quick, 2708),
Function.ability(311, "Morph_Overseer_quick", cmd_quick, 1448),
Function.ability(312, "Morph_PlanetaryFortress_quick", cmd_quick, 1450),
Function.ability(313, "Morph_Ravager_quick", cmd_quick, 2330),
Function.ability(314, "Morph_Root_screen", cmd_screen, 3680),
Function.ability(315, "Morph_SpineCrawlerRoot_screen", cmd_screen, 1729, 3680),
Function.ability(316, "Morph_SporeCrawlerRoot_screen", cmd_screen, 1731, 3680),
Function.ability(317, "Morph_SiegeMode_quick", cmd_quick, 388),
Function.ability(318, "Morph_SupplyDepot_Lower_quick", cmd_quick, 556),
Function.ability(319, "Morph_SupplyDepot_Raise_quick", cmd_quick, 558),
Function.ability(320, "Morph_ThorExplosiveMode_quick", cmd_quick, 2364),
Function.ability(321, "Morph_ThorHighImpactMode_quick", cmd_quick, 2362),
Function.ability(322, "Morph_Unsiege_quick", cmd_quick, 390),
Function.ability(323, "Morph_Uproot_quick", cmd_quick, 3681),
Function.ability(324, "Morph_SpineCrawlerUproot_quick", cmd_quick, 1725, 3681),
Function.ability(325, "Morph_SporeCrawlerUproot_quick", cmd_quick, 1727, 3681),
Function.ability(326, "Morph_VikingAssaultMode_quick", cmd_quick, 403),
Function.ability(327, "Morph_VikingFighterMode_quick", cmd_quick, 405),
Function.ability(328, "Morph_WarpGate_quick", cmd_quick, 1518),
Function.ability(329, "Morph_WarpPrismPhasingMode_quick", cmd_quick, 1528),
Function.ability(330, "Morph_WarpPrismTransportMode_quick", cmd_quick, 1530),
Function.ability(331, "Move_screen", cmd_screen, 16),
Function.ability(332, "Move_minimap", cmd_minimap, 16),
Function.ability(333, "Patrol_screen", cmd_screen, 17),
Function.ability(334, "Patrol_minimap", cmd_minimap, 17),
Function.ability(335, "Rally_Units_screen", cmd_screen, 3673),
Function.ability(336, "Rally_Units_minimap", cmd_minimap, 3673),
Function.ability(337, "Rally_Building_screen", cmd_screen, 195, 3673),
Function.ability(338, "Rally_Building_minimap", cmd_minimap, 195, 3673),
Function.ability(339, "Rally_Hatchery_Units_screen", cmd_screen, 212, 3673),
Function.ability(340, "Rally_Hatchery_Units_minimap", cmd_minimap, 212, 3673),
Function.ability(341, "Rally_Morphing_Unit_screen", cmd_screen, 199, 3673),
Function.ability(342, "Rally_Morphing_Unit_minimap", cmd_minimap, 199, 3673),
Function.ability(343, "Rally_Workers_screen", cmd_screen, 3690),
Function.ability(344, "Rally_Workers_minimap", cmd_minimap, 3690),
Function.ability(345, "Rally_CommandCenter_screen", cmd_screen, 203, 3690),
Function.ability(346, "Rally_CommandCenter_minimap", cmd_minimap, 203, 3690),
Function.ability(347, "Rally_Hatchery_Workers_screen", cmd_screen, 211, 3690),
Function.ability(348, "Rally_Hatchery_Workers_minimap", cmd_minimap, 211, 3690),
Function.ability(349, "Rally_Nexus_screen", cmd_screen, 207, 3690),
Function.ability(350, "Rally_Nexus_minimap", cmd_minimap, 207, 3690),
Function.ability(351, "Research_AdeptResonatingGlaives_quick", cmd_quick, 1594),
Function.ability(352, "Research_AdvancedBallistics_quick", cmd_quick, 805),
Function.ability(353, "Research_BansheeCloakingField_quick", cmd_quick, 790),
Function.ability(354, "Research_BansheeHyperflightRotors_quick", cmd_quick, 799),
Function.ability(355, "Research_BattlecruiserWeaponRefit_quick", cmd_quick, 1532),
Function.ability(356, "Research_Blink_quick", cmd_quick, 1593),
Function.ability(357, "Research_Burrow_quick", cmd_quick, 1225),
Function.ability(358, "Research_CentrifugalHooks_quick", cmd_quick, 1482),
Function.ability(359, "Research_Charge_quick", cmd_quick, 1592),
Function.ability(360, "Research_ChitinousPlating_quick", cmd_quick, 265),
Function.ability(361, "Research_CombatShield_quick", cmd_quick, 731),
Function.ability(362, "Research_ConcussiveShells_quick", cmd_quick, 732),
Function.ability(363, "Research_DrillingClaws_quick", cmd_quick, 764),
Function.ability(364, "Research_ExtendedThermalLance_quick", cmd_quick, 1097),
Function.ability(365, "Research_GlialRegeneration_quick", cmd_quick, 216),
Function.ability(366, "Research_GraviticBooster_quick", cmd_quick, 1093),
Function.ability(367, "Research_GraviticDrive_quick", cmd_quick, 1094),
Function.ability(368, "Research_GroovedSpines_quick", cmd_quick, 1282),
Function.ability(369, "Research_HiSecAutoTracking_quick", cmd_quick, 650),
Function.ability(370, "Research_HighCapacityFuelTanks_quick", cmd_quick, 804),
Function.ability(371, "Research_InfernalPreigniter_quick", cmd_quick, 761),
Function.ability(372, "Research_InterceptorGravitonCatapult_quick", cmd_quick, 44),
Function.ability(373, "Research_MagFieldLaunchers_quick", cmd_quick, 766),
Function.ability(374, "Research_MuscularAugments_quick", cmd_quick, 1283),
Function.ability(375, "Research_NeosteelFrame_quick", cmd_quick, 655),
Function.ability(376, "Research_NeuralParasite_quick", cmd_quick, 1455),
Function.ability(377, "Research_PathogenGlands_quick", cmd_quick, 1454),
Function.ability(378, "Research_PersonalCloaking_quick", cmd_quick, 820),
Function.ability(379, "Research_PhoenixAnionPulseCrystals_quick", cmd_quick, 46),
Function.ability(380, "Research_PneumatizedCarapace_quick", cmd_quick, 1223),
Function.ability(381, "Research_ProtossAirArmor_quick", cmd_quick, 3692),
Function.ability(382, "Research_ProtossAirArmorLevel1_quick", cmd_quick, 1565, 3692),
Function.ability(383, "Research_ProtossAirArmorLevel2_quick", cmd_quick, 1566, 3692),
Function.ability(384, "Research_ProtossAirArmorLevel3_quick", cmd_quick, 1567, 3692),
Function.ability(385, "Research_ProtossAirWeapons_quick", cmd_quick, 3693),
Function.ability(386, "Research_ProtossAirWeaponsLevel1_quick", cmd_quick, 1562, 3693),
Function.ability(387, "Research_ProtossAirWeaponsLevel2_quick", cmd_quick, 1563, 3693),
Function.ability(388, "Research_ProtossAirWeaponsLevel3_quick", cmd_quick, 1564, 3693),
Function.ability(389, "Research_ProtossGroundArmor_quick", cmd_quick, 3694),
Function.ability(390, "Research_ProtossGroundArmorLevel1_quick", cmd_quick, 1065, 3694),
Function.ability(391, "Research_ProtossGroundArmorLevel2_quick", cmd_quick, 1066, 3694),
Function.ability(392, "Research_ProtossGroundArmorLevel3_quick", cmd_quick, 1067, 3694),
Function.ability(393, "Research_ProtossGroundWeapons_quick", cmd_quick, 3695),
Function.ability(394, "Research_ProtossGroundWeaponsLevel1_quick", cmd_quick, 1062, 3695),
Function.ability(395, "Research_ProtossGroundWeaponsLevel2_quick", cmd_quick, 1063, 3695),
Function.ability(396, "Research_ProtossGroundWeaponsLevel3_quick", cmd_quick, 1064, 3695),
Function.ability(397, "Research_ProtossShields_quick", cmd_quick, 3696),
Function.ability(398, "Research_ProtossShieldsLevel1_quick", cmd_quick, 1068, 3696),
Function.ability(399, "Research_ProtossShieldsLevel2_quick", cmd_quick, 1069, 3696),
Function.ability(400, "Research_ProtossShieldsLevel3_quick", cmd_quick, 1070, 3696),
Function.ability(401, "Research_PsiStorm_quick", cmd_quick, 1126),
Function.ability(402, "Research_RavenCorvidReactor_quick", cmd_quick, 793),
Function.ability(403, "Research_RavenRecalibratedExplosives_quick", cmd_quick, 803),
Function.ability(404, "Research_ShadowStrike_quick", cmd_quick, 2720),
Function.ability(405, "Research_Stimpack_quick", cmd_quick, 730),
Function.ability(406, "Research_TerranInfantryArmor_quick", cmd_quick, 3697),
Function.ability(407, "Research_TerranInfantryArmorLevel1_quick", cmd_quick, 656, 3697),
Function.ability(408, "Research_TerranInfantryArmorLevel2_quick", cmd_quick, 657, 3697),
Function.ability(409, "Research_TerranInfantryArmorLevel3_quick", cmd_quick, 658, 3697),
Function.ability(410, "Research_TerranInfantryWeapons_quick", cmd_quick, 3698),
Function.ability(411, "Research_TerranInfantryWeaponsLevel1_quick", cmd_quick, 652, 3698),
Function.ability(412, "Research_TerranInfantryWeaponsLevel2_quick", cmd_quick, 653, 3698),
Function.ability(413, "Research_TerranInfantryWeaponsLevel3_quick", cmd_quick, 654, 3698),
Function.ability(414, "Research_TerranShipWeapons_quick", cmd_quick, 3699),
Function.ability(415, "Research_TerranShipWeaponsLevel1_quick", cmd_quick, 861, 3699),
Function.ability(416, "Research_TerranShipWeaponsLevel2_quick", cmd_quick, 862, 3699),
Function.ability(417, "Research_TerranShipWeaponsLevel3_quick", cmd_quick, 863, 3699),
Function.ability(418, "Research_TerranStructureArmorUpgrade_quick", cmd_quick, 651),
Function.ability(419, "Research_TerranVehicleAndShipPlating_quick", cmd_quick, 3700),
Function.ability(420, "Research_TerranVehicleAndShipPlatingLevel1_quick", cmd_quick, 864, 3700),
Function.ability(421, "Research_TerranVehicleAndShipPlatingLevel2_quick", cmd_quick, 865, 3700),
Function.ability(422, "Research_TerranVehicleAndShipPlatingLevel3_quick", cmd_quick, 866, 3700),
Function.ability(423, "Research_TerranVehicleWeapons_quick", cmd_quick, 3701),
Function.ability(424, "Research_TerranVehicleWeaponsLevel1_quick", cmd_quick, 855, 3701),
Function.ability(425, "Research_TerranVehicleWeaponsLevel2_quick", cmd_quick, 856, 3701),
Function.ability(426, "Research_TerranVehicleWeaponsLevel3_quick", cmd_quick, 857, 3701),
Function.ability(427, "Research_TunnelingClaws_quick", cmd_quick, 217),
Function.ability(428, "Research_WarpGate_quick", cmd_quick, 1568),
Function.ability(429, "Research_ZergFlyerArmor_quick", cmd_quick, 3702),
Function.ability(430, "Research_ZergFlyerArmorLevel1_quick", cmd_quick, 1315, 3702),
Function.ability(431, "Research_ZergFlyerArmorLevel2_quick", cmd_quick, 1316, 3702),
Function.ability(432, "Research_ZergFlyerArmorLevel3_quick", cmd_quick, 1317, 3702),
Function.ability(433, "Research_ZergFlyerAttack_quick", cmd_quick, 3703),
Function.ability(434, "Research_ZergFlyerAttackLevel1_quick", cmd_quick, 1312, 3703),
Function.ability(435, "Research_ZergFlyerAttackLevel2_quick", cmd_quick, 1313, 3703),
Function.ability(436, "Research_ZergFlyerAttackLevel3_quick", cmd_quick, 1314, 3703),
Function.ability(437, "Research_ZergGroundArmor_quick", cmd_quick, 3704),
Function.ability(438, "Research_ZergGroundArmorLevel1_quick", cmd_quick, 1189, 3704),
Function.ability(439, "Research_ZergGroundArmorLevel2_quick", cmd_quick, 1190, 3704),
Function.ability(440, "Research_ZergGroundArmorLevel3_quick", cmd_quick, 1191, 3704),
Function.ability(441, "Research_ZergMeleeWeapons_quick", cmd_quick, 3705),
Function.ability(442, "Research_ZergMeleeWeaponsLevel1_quick", cmd_quick, 1186, 3705),
Function.ability(443, "Research_ZergMeleeWeaponsLevel2_quick", cmd_quick, 1187, 3705),
Function.ability(444, "Research_ZergMeleeWeaponsLevel3_quick", cmd_quick, 1188, 3705),
Function.ability(445, "Research_ZergMissileWeapons_quick", cmd_quick, 3706),
Function.ability(446, "Research_ZergMissileWeaponsLevel1_quick", cmd_quick, 1192, 3706),
Function.ability(447, "Research_ZergMissileWeaponsLevel2_quick", cmd_quick, 1193, 3706),
Function.ability(448, "Research_ZergMissileWeaponsLevel3_quick", cmd_quick, 1194, 3706),
Function.ability(449, "Research_ZerglingAdrenalGlands_quick", cmd_quick, 1252),
Function.ability(450, "Research_ZerglingMetabolicBoost_quick", cmd_quick, 1253),
Function.ability(451, "Smart_screen", cmd_screen, 1),
Function.ability(452, "Smart_minimap", cmd_minimap, 1),
Function.ability(453, "Stop_quick", cmd_quick, 3665),
Function.ability(454, "Stop_Building_quick", cmd_quick, 2057, 3665),
Function.ability(455, "Stop_Redirect_quick", cmd_quick, 1691, 3665),
Function.ability(456, "Stop_Stop_quick", cmd_quick, 4, 3665),
Function.ability(457, "Train_Adept_quick", cmd_quick, 922),
Function.ability(458, "Train_Baneling_quick", cmd_quick, 80),
Function.ability(459, "Train_Banshee_quick", cmd_quick, 621),
Function.ability(460, "Train_Battlecruiser_quick", cmd_quick, 623),
Function.ability(461, "Train_Carrier_quick", cmd_quick, 948),
Function.ability(462, "Train_Colossus_quick", cmd_quick, 978),
Function.ability(463, "Train_Corruptor_quick", cmd_quick, 1353),
Function.ability(464, "Train_Cyclone_quick", cmd_quick, 597),
Function.ability(465, "Train_DarkTemplar_quick", cmd_quick, 920),
Function.ability(466, "Train_Disruptor_quick", cmd_quick, 994),
Function.ability(467, "Train_Drone_quick", cmd_quick, 1342),
Function.ability(468, "Train_Ghost_quick", cmd_quick, 562),
Function.ability(469, "Train_Hellbat_quick", cmd_quick, 596),
Function.ability(470, "Train_Hellion_quick", cmd_quick, 595),
Function.ability(471, "Train_HighTemplar_quick", cmd_quick, 919),
Function.ability(472, "Train_Hydralisk_quick", cmd_quick, 1345),
Function.ability(473, "Train_Immortal_quick", cmd_quick, 979),
Function.ability(474, "Train_Infestor_quick", cmd_quick, 1352),
Function.ability(475, "Train_Liberator_quick", cmd_quick, 626),
Function.ability(476, "Train_Marauder_quick", cmd_quick, 563),
Function.ability(477, "Train_Marine_quick", cmd_quick, 560),
Function.ability(478, "Train_Medivac_quick", cmd_quick, 620),
Function.ability(479, "Train_MothershipCore_quick", cmd_quick, 1853),
Function.ability(480, "Train_Mutalisk_quick", cmd_quick, 1346),
Function.ability(481, "Train_Observer_quick", cmd_quick, 977),
Function.ability(482, "Train_Oracle_quick", cmd_quick, 954),
Function.ability(483, "Train_Overlord_quick", cmd_quick, 1344),
Function.ability(484, "Train_Phoenix_quick", cmd_quick, 946),
Function.ability(485, "Train_Probe_quick", cmd_quick, 1006),
Function.ability(486, "Train_Queen_quick", cmd_quick, 1632),
Function.ability(487, "Train_Raven_quick", cmd_quick, 622),
Function.ability(488, "Train_Reaper_quick", cmd_quick, 561),
Function.ability(489, "Train_Roach_quick", cmd_quick, 1351),
Function.ability(490, "Train_SCV_quick", cmd_quick, 524),
Function.ability(491, "Train_Sentry_quick", cmd_quick, 921),
Function.ability(492, "Train_SiegeTank_quick", cmd_quick, 591),
Function.ability(493, "Train_Stalker_quick", cmd_quick, 917),
Function.ability(494, "Train_SwarmHost_quick", cmd_quick, 1356),
Function.ability(495, "Train_Tempest_quick", cmd_quick, 955),
Function.ability(496, "Train_Thor_quick", cmd_quick, 594),
Function.ability(497, "Train_Ultralisk_quick", cmd_quick, 1348),
Function.ability(498, "Train_VikingFighter_quick", cmd_quick, 624),
Function.ability(499, "Train_Viper_quick", cmd_quick, 1354),
Function.ability(500, "Train_VoidRay_quick", cmd_quick, 950),
Function.ability(501, "Train_WarpPrism_quick", cmd_quick, 976),
Function.ability(502, "Train_WidowMine_quick", cmd_quick, 614),
Function.ability(503, "Train_Zealot_quick", cmd_quick, 916),
Function.ability(504, "Train_Zergling_quick", cmd_quick, 1343),
Function.ability(505, "TrainWarp_Adept_screen", cmd_screen, 1419),
Function.ability(506, "TrainWarp_DarkTemplar_screen", cmd_screen, 1417),
Function.ability(507, "TrainWarp_HighTemplar_screen", cmd_screen, 1416),
Function.ability(508, "TrainWarp_Sentry_screen", cmd_screen, 1418),
Function.ability(509, "TrainWarp_Stalker_screen", cmd_screen, 1414),
Function.ability(510, "TrainWarp_Zealot_screen", cmd_screen, 1413),
Function.ability(511, "UnloadAll_quick", cmd_quick, 3664),
Function.ability(512, "UnloadAll_Bunker_quick", cmd_quick, 408, 3664),
Function.ability(513, "UnloadAll_CommandCenter_quick", cmd_quick, 413, 3664),
Function.ability(514, "UnloadAll_NydasNetwork_quick", cmd_quick, 1438, 3664),
Function.ability(515, "UnloadAll_NydusWorm_quick", cmd_quick, 2371, 3664),
Function.ability(516, "UnloadAllAt_screen", cmd_screen, 3669),
Function.ability(517, "UnloadAllAt_minimap", cmd_minimap, 3669),
Function.ability(518, "UnloadAllAt_Medivac_screen", cmd_screen, 396, 3669),
Function.ability(519, "UnloadAllAt_Medivac_minimap", cmd_minimap, 396, 3669),
Function.ability(520, "UnloadAllAt_Overlord_screen", cmd_screen, 1408, 3669),
Function.ability(521, "UnloadAllAt_Overlord_minimap", cmd_minimap, 1408, 3669),
Function.ability(522, "UnloadAllAt_WarpPrism_screen", cmd_screen, 913, 3669),
Function.ability(523, "UnloadAllAt_WarpPrism_minimap", cmd_minimap, 913, 3669),
])
# pylint: enable=line-too-long
# Some indexes to support features.py and action conversion.
ABILITY_IDS = collections.defaultdict(set) # {ability_id: {funcs}}
for func in FUNCTIONS:
if func.ability_id >= 0:
ABILITY_IDS[func.ability_id].add(func)
ABILITY_IDS = {k: frozenset(v) for k, v in six.iteritems(ABILITY_IDS)}
FUNCTIONS_AVAILABLE = {f.id: f for f in FUNCTIONS if f.avail_fn}
class FunctionCall(collections.namedtuple(
"FunctionCall", ["function", "arguments"])):
"""Represents a function call action.
Attributes:
function: Store the function id, eg 2 for select_point.
arguments: The list of arguments for that function, each being a list of
ints. For select_point this could be: [[0], [23, 38]].
"""
__slots__ = ()
@classmethod
def all_arguments(cls, function, arguments):
"""Helper function for creating `FunctionCall`s with `Arguments`.
Args:
function: The value to store for the action function.
arguments: The values to store for the arguments of the action. Can either
be an `Arguments` object, a `dict`, or an iterable. If a `dict` or an
iterable is provided, the values will be unpacked into an `Arguments`
object.
Returns:
A new `FunctionCall` instance.
"""
if isinstance(arguments, dict):
arguments = Arguments(**arguments)
elif not isinstance(arguments, Arguments):
arguments = Arguments(*arguments)
return cls(function, arguments)
class ValidActions(collections.namedtuple(
"ValidActions", ["types", "functions"])):
"""The set of types and functions that are valid for an agent to use.
Attributes:
types: A namedtuple of the types that the functions require. Unlike TYPES
above, this includes the sizes for screen and minimap.
functions: A namedtuple of all the functions.
"""
__slots__ = ()
|
StarcoderdataPython
|
1604125
|
import sys
import subprocess
res = subprocess.check_output(["sf", "-json",sys.argv[1][2:]])
for line in res.splitlines():
subprocess.call(["aws","sns","publish","--topic-arn","arn:aws:sns:eu-west-2:247222723249:fileformat-check-result-dev","--message",line])
|
StarcoderdataPython
|
1672079
|
from ....Classes.Arc1 import Arc1
from ....Classes.SurfLine import SurfLine
def get_surface_active(self, alpha=0, delta=0):
"""Return the full winding surface
Parameters
----------
self : SlotW22
A SlotW22 object
alpha : float
float number for rotation (Default value = 0) [rad]
delta : complex
complex number for translation (Default value = 0)
Returns
-------
surf_wind: Surface
Surface corresponding to the Winding Area
"""
# get the name of the lamination
st = self.get_name_lam()
# Create curve list
curve_list = self.build_geometry()[1:-1]
curve_list.append(
Arc1(
begin=curve_list[-1].get_end(),
end=curve_list[0].get_begin(),
radius=-abs(curve_list[-1].get_end()),
is_trigo_direction=False,
)
)
# Create surface
if self.is_outwards():
Zmid = self.get_Rbo() + self.H0 + self.H2 / 2
else:
Zmid = self.get_Rbo() - self.H0 - self.H2 / 2
surface = SurfLine(
line_list=curve_list, label="Wind_" + st + "_R0_T0_S0", point_ref=Zmid
)
# Apply transformation
surface.rotate(alpha)
surface.translate(delta)
return surface
|
StarcoderdataPython
|
36300
|
<gh_stars>0
#!/usr/bin/env python
# import required modules:
#
import os
import sys
import string
import random
from random import shuffle
from pathlib2 import Path
import linecache
import time
# This class shuffles songs without repeating and keeps track of where
# it left off. See '-help' option for more details.
#
class shuffler:
# define constructor, take arguments as parameters
#
def __init__(self):
self.argv_a = []
# end of constructor
# Method to print arguments from command line provided
#
def printArgs(self):
print "Arguments provided are: ", self.argv_a
return
# Set command line arguments provided, do not include script name
#
def setter(self, commandArgs):
# Set data
#
self.argv_a = commandArgs[1:]
return
# Check for a '-help' option and print help information
#
def check_options(self):
for args in self.argv_a:
if args == '-help':
print "\nsynopsis: This class shuffles the files in the provided command line argument path, then plays each song unrepeated until all songs have been played. Then it will reshuffle the songs and continue the same process.\n"
print "desc: see above.\n"
print "example: provide a path /songs/. Will capture the length of files in that directory and begin the shuffle.\n"
print "options: supports a '-help' option as shown here.\n"
print "arguments: path to files to be shuffled and '-help'.\n"
print "man page: none.\n"
# Exit program if help argument provided
#
sys.exit()
return
# Method to play the shuffler
#
def play(self):
# Get file list from data path in command line argument
#
for root, dir, files in os.walk(self.argv_a[0]):
# store the files from the path as a list in 'mysongs'
#
mysongs = files
# Start an infinite loop
#
while True:
# Check if counter file exists, if not, generate one to hold the counter
# in a scratch file. Also check if the counter has surpassed the number
# of songs
#
my_file = Path("./counter.txt")
if not my_file.is_file() or open("./counter.txt").readline() >= str(len(mysongs)):
# Set counter to 1 for first line in a file
#
songcounter = 1
# Write (or overwrite) song counter to file. Open, write, close the file.
#
counterOut = open("./counter.txt", "w")
counterOut.write(str(songcounter))
counterOut.close()
# Shuffle songs and write (or overwrite them) to a file line by line for each song
#
# Shuffle the list of songs fromt the arguments
#
shuffledList = mysongs
random.shuffle(shuffledList)
shuffleOut = open("./shuffle.txt", "w")
# Write shuffled list into file
#
for i in shuffledList:
shuffleOut.write("%s\n" % i)
# Loop over songs in list
#
for j in range(0, len(mysongs)):
# Get counter for index from file, cast to int, then print counter
#
tempCounter = int(open("./counter.txt").readline())
print tempCounter
# Get random song from the shuffle.txt file according to
# the counter above
#
currentSong = linecache.getline("./shuffle.txt", tempCounter)
# Print the song
#
print currentSong
# Increment counter, overwrite scratch file, and close
#
songcounter = tempCounter
songcounter += 1
counterOut = open("./counter.txt", "w")
counterOut.write(str(songcounter))
counterOut.close()
# Sleep for 1 second as to print 1 song per second
#
time.sleep(1)
# Exit gracefully
return
# main: this is the main function of this Python script
#
def main(argv):
# Create instance of the shuffler class
#
myshuffle = shuffler()
# Set the command line arguments as the input for the class
#
myshuffle.setter(argv)
# Check if the help option is invoked
#
myshuffle.check_options()
# Print the arguments provided to the class from the setter method
#
myshuffle.printArgs()
# Play the shuffler
#
myshuffle.play()
# End gracefully
#
return
# begin gracefully
#
if __name__ == "__main__":
main(sys.argv[0:])
#
# end of file
|
StarcoderdataPython
|
1618710
|
<reponame>kaiergin/Quadcopter_simulator
import numpy as np
import math
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as Axes3D
import sys
class GUI():
# 'quad_list' is a dictionary of format: quad_list = {'quad_1_name':{'position':quad_1_position,'orientation':quad_1_orientation,'arm_span':quad_1_arm_span}, ...}
def __init__(self, quads):
self.quads = quads
self.fig = plt.figure()
self.ax = Axes3D.Axes3D(self.fig)
self.ax.set_xlim3d([-2.0, 2.0])
self.ax.set_xlabel('X')
self.ax.set_ylim3d([-2.0, 2.0])
self.ax.set_ylabel('Y')
self.ax.set_zlim3d([0, 5.0])
self.ax.set_zlabel('Z')
self.ax.set_title('Quadcopter Simulation')
self.init_plot()
self.fig.canvas.mpl_connect('key_press_event', self.keypress_routine)
def rotation_matrix(self,angles):
ct = math.cos(angles[0])
cp = math.cos(angles[1])
cg = math.cos(angles[2])
st = math.sin(angles[0])
sp = math.sin(angles[1])
sg = math.sin(angles[2])
R_x = np.array([[1,0,0],[0,ct,-st],[0,st,ct]])
R_y = np.array([[cp,0,sp],[0,1,0],[-sp,0,cp]])
R_z = np.array([[cg,-sg,0],[sg,cg,0],[0,0,1]])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R
def init_plot(self):
for key in self.quads:
self.quads[key]['l1'], = self.ax.plot([],[],[],color='blue',linewidth=3,antialiased=False)
self.quads[key]['l2'], = self.ax.plot([],[],[],color='red',linewidth=3,antialiased=False)
self.quads[key]['hub'], = self.ax.plot([],[],[],marker='o',color='green', markersize=6,antialiased=False)
def update(self):
for key in self.quads:
R = self.rotation_matrix(self.quads[key]['orientation'])
L = self.quads[key]['L']
points = np.array([ [-L,0,0], [L,0,0], [0,-L,0], [0,L,0], [0,0,0], [0,0,0] ]).T
points = np.dot(R,points)
points[0,:] += self.quads[key]['position'][0]
points[1,:] += self.quads[key]['position'][1]
points[2,:] += self.quads[key]['position'][2]
self.quads[key]['l1'].set_data(points[0,0:2],points[1,0:2])
self.quads[key]['l1'].set_3d_properties(points[2,0:2])
self.quads[key]['l2'].set_data(points[0,2:4],points[1,2:4])
self.quads[key]['l2'].set_3d_properties(points[2,2:4])
self.quads[key]['hub'].set_data(points[0,5],points[1,5])
self.quads[key]['hub'].set_3d_properties(points[2,5])
plt.pause(0.000000000000001)
def keypress_routine(self,event):
sys.stdout.flush()
if event.key == 'x':
y = list(self.ax.get_ylim3d())
y[0] += 0.2
y[1] += 0.2
self.ax.set_ylim3d(y)
elif event.key == 'w':
y = list(self.ax.get_ylim3d())
y[0] -= 0.2
y[1] -= 0.2
self.ax.set_ylim3d(y)
elif event.key == 'd':
x = list(self.ax.get_xlim3d())
x[0] += 0.2
x[1] += 0.2
self.ax.set_xlim3d(x)
elif event.key == 'a':
x = list(self.ax.get_xlim3d())
x[0] -= 0.2
x[1] -= 0.2
self.ax.set_xlim3d(x)
|
StarcoderdataPython
|
144423
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import numpy as np
import torch
from pytext.models.representations.transformer import (
TransformerLayer,
MultiheadSelfAttention,
)
from pytext.models.roberta import RoBERTaEncoder
from torch import nn, Tensor
torch.ops.load_library("//pytorch/FasterTransformers3.1:faster_transformers")
@torch.jit.script
def sequence_mask(neg_padding_mask: Tensor) -> Tensor:
neg_padding_mask = neg_padding_mask.half()
mask = neg_padding_mask.view(
neg_padding_mask.size(0), 1, 1, neg_padding_mask.size(1)
)
m2 = mask.transpose(2, 3)
return mask * m2
def to_fast_transformer_weights(layer):
attn_qw, attn_kw, attn_vw = layer.attention.input_projection.weight.chunk(3, dim=0)
attn_qb, attn_kb, attn_vb = layer.attention.input_projection.bias.chunk(3, dim=0)
attn_ow = layer.attention.output_projection.weight
attn_ob = layer.attention.output_projection.bias
attn_nw = layer.attention_layer_norm.weight
attn_nb = layer.attention_layer_norm.bias
inter_w = layer.residual_mlp.mlp.__getattr__("0").weight
inter_b = layer.residual_mlp.mlp.__getattr__("0").bias
output_w = layer.residual_mlp.mlp.__getattr__("3").weight
output_b = layer.residual_mlp.mlp.__getattr__("3").bias
norm_w = layer.final_layer_norm.weight
norm_b = layer.final_layer_norm.bias
fast_transformer_weights = [
attn_qw.transpose(-1, -2).contiguous(),
attn_qb,
attn_kw.transpose(-1, -2).contiguous(),
attn_kb,
attn_vw.transpose(-1, -2).contiguous(),
attn_vb,
attn_ow.transpose(-1, -2).contiguous(),
attn_ob,
attn_nw,
attn_nb,
inter_w.transpose(-1, -2).contiguous(),
inter_b,
output_w.transpose(-1, -2).contiguous(),
output_b,
norm_w,
norm_b,
torch.tensor(0),
]
return [t.half().cuda() for t in fast_transformer_weights]
# Hide custom class behind torch.jit.script as jit.trace doesn't support custom classes.
class NVTransformerStack(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = layers
self.layer_num = len(layers)
def forward(self, encoded: Tensor, neg_padding_mask: Tensor) -> List[Tensor]:
# seq_lengths: [B,]
sequence_lengths = neg_padding_mask.sum(dim=1, dtype=torch.int32)
# Note - this does a HtoD copy/stream synchronization
# Necessary because our implementation does not handle the zero-token case.
if sequence_lengths.sum().item() == 0:
return [encoded.transpose(0, 1)] + [
torch.zeros_like(encoded.transpose(0, 1)) for _ in range(self.layer_num)
]
# Note - this also does a HtoD copy/stream synchronization.
(
hidden_states,
sequence_id_offset,
) = torch.ops.fastertransformer.build_mask_remove_padding(
encoded, sequence_lengths
)
# attention_mask: [B, 1, T, T]
attention_mask = sequence_mask(neg_padding_mask).half()
# trt_seq_len: [B + 1,]
trt_seq_len = torch.cumsum(
torch.cat(
[
torch.zeros(
1, device=sequence_lengths.device, dtype=sequence_lengths.dtype
),
sequence_lengths,
],
dim=0,
),
dim=0,
dtype=torch.int32,
)
all_hidden_states = [hidden_states]
for i in range(self.layer_num):
hidden_states = self.layers[i].forward(
hidden_states, attention_mask, trt_seq_len, sequence_id_offset
)
all_hidden_states.append(hidden_states)
# Remap back to padded [B, T, D] representation, and transpose to [T, B, D].
states = []
for hidden_states in all_hidden_states:
# Ideally jit.tracer will eliminate unncessary ones as the corresponding
# output tensor would be unused. It doesn't seem to currently, though.
state = torch.ops.fastertransformer.rebuild_padding(
hidden_states, sequence_id_offset, attention_mask, 0
)
# Convert to [T, B, D] representation.
states.append(state.transpose(1, 0))
return states
class NVFasterTransformerEncoder(nn.Module):
def __init__(self, old_transformer):
super().__init__()
remove_padding = True
use_trt_kernel = True
allow_gemm_test = False
int8_mode = 0
self.layer_num = len(old_transformer.layers)
self.int8_mode = int8_mode
self.token_embedding = old_transformer.token_embedding
self.positional_embedding = old_transformer.positional_embedding
self.embedding_layer_norm = old_transformer.embedding_layer_norm
self.dropout = old_transformer.dropout
self.padding_idx = old_transformer.padding_idx
num_headss, scalings, embed_dims = set(), set(), set()
for layer in old_transformer.layers:
assert isinstance(layer, TransformerLayer)
att = layer.attention
assert isinstance(att, MultiheadSelfAttention)
num_headss.add(att.num_heads)
scalings.add(att.scaling)
embed_dims.add(att.embed_dim)
# TODO: ResidualMLP check.
# ensure values match.
(num_heads,) = num_headss
(scaling,) = scalings
(embed_dims,) = embed_dims
head_dim = embed_dims // num_heads
np.testing.assert_allclose(scaling, 1.0 / np.sqrt(head_dim))
encoders = []
for i in range(self.layer_num):
encoders.append(
torch.classes.FasterTransformer.Encoder(
*to_fast_transformer_weights(old_transformer.layers[i]),
num_heads,
head_dim,
remove_padding,
int8_mode,
self.layer_num,
i,
allow_gemm_test,
use_trt_kernel
)
)
self.encoder = torch.jit.script(NVTransformerStack(encoders))
def forward(self, tokens: Tensor) -> List[Tensor]:
# Vanilla transformer prelude
neg_padding_mask = tokens.ne(self.padding_idx)
embedded = self.token_embedding(tokens)
embedded_positions = self.positional_embedding(tokens)
normed = self.embedding_layer_norm(embedded + embedded_positions)
normed = self.dropout(normed)
padded_normed = normed * neg_padding_mask.unsqueeze(-1)
# encoded: [B, T, C]
encoded = padded_normed.half()
states = self.encoder(encoded, neg_padding_mask)
# commonly you can retrieve a single "sentence representation" as
# states[-1].transpose(0, 1)
return states
# Swap a transformer for only RoBERTaEncoder encoders
def swap_modules_for_faster_transformer(model):
if hasattr(model, "encoder") and isinstance(model.encoder, RoBERTaEncoder):
old_transformer = model.encoder.encoder.transformer
model.encoder.encoder.transformer = NVFasterTransformerEncoder(old_transformer)
return model
else:
return model
|
StarcoderdataPython
|
3315789
|
<gh_stars>0
import noise
import numpy as np
from PIL import Image
import math
import io
import json
from scipy.misc import toimage
shape = (1024, 1024)
scale = 150
octaves = 4
persistence = 0.5
lacunarity = 2.0
threshold = 0.05
seed = np.random.randint(0, 500)
black = [0, 0, 0]
blue = [65,105,225]
green = [34,139,34]
beach = [238, 214, 175]
snow = [255, 250, 250]
mountain = [139, 137, 137]
lightblue = [0,191,255]
darkgreen = [0,100,0]
sandy = [210,180,140]
def add_color2(world):
color_world = np.zeros(world.shape+(3,))
for i in range(shape[0]):
for j in range(shape[1]):
if world[i][j] < threshold + 0.05:
color_world[i][j] = blue
elif world[i][j] < threshold + 0.055:
color_world[i][j] = sandy
elif world[i][j] < threshold + 0.1:
color_world[i][j] = beach
elif world[i][j] < threshold + 0.25:
color_world[i][j] = green
elif world[i][j] < threshold + 0.6:
color_world[i][j] = darkgreen
elif world[i][j] < threshold + 0.7:
color_world[i][j] = mountain
elif world[i][j] < threshold + 1.0:
color_world[i][j] = snow
return color_world
world = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
world[i][j] = noise.pnoise2(i / scale,
j / scale,
octaves=octaves,
persistence=persistence,
lacunarity=lacunarity,
repeatx=2048,
repeaty=2048,
base=seed)
center_x, center_y = shape[1] // 2, shape[0] // 2
circle_grad = np.zeros_like(world)
for y in range(world.shape[0]):
for x in range(world.shape[1]):
distx = abs(x - center_x)
disty = abs(y - center_y)
dist = math.sqrt(distx*distx + disty*disty)
circle_grad[y][x] = dist
# get it between -1 and 1
max_grad = np.max(circle_grad)
circle_grad = circle_grad / max_grad
circle_grad -= 0.5
circle_grad *= 2.0
circle_grad = -circle_grad
# shrink gradient
for y in range(world.shape[0]):
for x in range(world.shape[1]):
if circle_grad[y][x] > 0:
circle_grad[y][x] *= 20
# get it between 0 and 1
max_grad = np.max(circle_grad)
circle_grad = circle_grad / max_grad
with io.open("grad.json", "w") as file:
file.write(json.dumps({ "grad": circle_grad.tolist()}))
toimage(circle_grad).show()
world_noise = np.zeros_like(world)
for i in range(shape[0]):
for j in range(shape[1]):
world_noise[i][j] = (world[i][j] * circle_grad[i][j])
if world_noise[i][j] > 0:
world_noise[i][j] *= 20
# get it between 0 and 1
max_grad = np.max(world_noise)
world_noise = world_noise / max_grad
island_world_grad = add_color2(world_noise)
toimage(island_world_grad).show()
|
StarcoderdataPython
|
3294445
|
<filename>fairseq/criterions/masked_adlm.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion('masked_adlm')
class MaskedAdLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
"""
def __init__(self, args, task):
super(MaskedAdLmLoss, self).__init__(args, task)
self.vocab = self.task.source_dictionary
print(len(self.vocab.count))
self.register_buffer('margins', torch.zeros((len(self.vocab.count), 1)))
self.margins.requires_grad = False
self.margin_lambda = args.margin_lambda
self.margin_lr = args.margin_lr
self.margin_norm = args.margin_norm
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
super(MaskedAdLmLoss,
MaskedAdLmLoss).add_args(parser)
parser.add_argument('--margin_lambda', default=0.5, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin_lr', default=0.0001, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin-norm', default='l1', type=str,
help='Type of margin norm in the loss')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
# compute MLM loss
#self.margins.requires_grad = model.training
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
# (Rare case) When all tokens are masked, the model results in empty
# tensor and gives CUDA error.
if sample_size == 0:
masked_tokens = None
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
#import IPython
#IPython.embed()
if sample_size != 0:
targets = targets[masked_tokens]
# targets shape: [x]
# logits.shape: [x, 32769]
one_hot = F.one_hot(targets, len(self.vocab.count)) # [x, 32769]
#import IPython
#IPython.embed()
m = F.embedding(targets, self.margins) # [x, 1]
#m = self.margins(targets).squeeze(dim=-1)
margin = m * one_hot # [x, 32769]
#import IPython
#IPython.embed()
logits_minus_margin = logits - margin
log_softmax = F.log_softmax(
logits_minus_margin.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
) # [x, 32769]
adm_loss = F.nll_loss(
log_softmax,
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
# cal margin grad
with torch.no_grad():
margin_log_grad = torch.gather(log_softmax.detach(), dim=-1,
index=targets.unsqueeze(-1)) # [x, 1]
margin_grad_cross = torch.exp(margin_log_grad) - \
torch.ones_like(margin_log_grad)
if self.margin_norm == 'l1':
margin_grad = margin_grad_cross - torch.ones_like(m) * self.margin_lambda
else:
# l2 norm
margin_grad = margin_grad_cross - m * self.margin_lambda * 2.0
margin_update = -1.0 * margin_grad * self.margin_lr
self.margins.scatter_add_(0, targets.unsqueeze(-1), margin_update.half())
# for logging below! margin_norm; normal loss
margin_norm = torch.mean(self.margins) * sample['nsentences']# used for log!
normal_loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
logging_output = {
'loss': utils.item(normal_loss.data) if reduce else normal_loss.data,
'margin_n':utils.item(margin_norm.data) if reduce else margin_norm.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'admloss': utils.item(adm_loss.data) if reduce else adm_loss.data,
}
return adm_loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
admloss_sum = sum(log.get('admloss', 0) for log in logging_outputs)
margin_n = sum(log.get('margin_n', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('admloss', admloss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('margin_norm', margin_n / nsentences, 32, round=3)
metrics.log_derived('ppl', lambda meters: round(2**meters['loss'].avg, 3))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
StarcoderdataPython
|
4810114
|
from opentrons import protocol_api
import pandas as pd
import decimal, math
metadata = {'apiLevel': '2.8'}
def run(protocol: protocol_api.ProtocolContext):
# number of regular plates
regular_plates = 4
# pipette name
pipette_name = 'p300_multi_gen2'
# labware name for regular plates
regular_plate_name = 'opentrons_96_tiprack_300ul'
# volTip = volume dispense to each well of a regular plate
volTip = 100
deepplate = protocol.load_labware('nest_96_wellplate_2ml_deep',1)
tiprack_1 = protocol.load_labware('opentrons_96_tiprack_300ul',2)
pipette = protocol.load_instrument(pipette_name,'right', tip_racks=[tiprack_1])
# determine the amount to aspirate to multi-dispense
if pipette_name == 'p20_multi_gen2':
aspireVol = 20
minVol = 2
elif pipette_name == 'p300_multi' or pipette_name == 'p300_multi_gen2':
aspireVol = 300
minVol = 30
# constrains and labware compatibility check
# print ERROR when volume set for each well exceeds max pipette volume
if volTip > aspireVol:
print("ERROR: Volume exceeds pipette capacity.",
"Consider using a higher capacity pipette.")
# optimization suggestion when using p20
# and each aspiration can only dispense to one well at most
elif aspireVol < 2*volTip and aspireVol == 20:
print("WARNING: The current pipette only allows one dispensation per aspiration.",
"Optimize by using a higher capacity pipette")
# add regular plates to a list
list_of_regular_plates = []
plate = "plate_"
for i in range(regular_plates):
plate_name = plate + str(i+1)
plate_num = str(plate_name)
# set position for plates starting at 4
plate_num = protocol.load_labware(regular_plate_name,i+4)
list_of_regular_plates.append(plate_num)
#pipette from A1 - A12
curVol = 0
for i in range(0,12):
well_range = "A" + str(i+1)
pipette.pick_up_tip(tiprack_1[well_range])
#pipette to x number of regular_plates
for i in list_of_regular_plates:
if curVol < volTip or curVol <= minVol:
pipette.aspirate(float(aspireVol-curVol), deepplate[well_range])
curVol = aspireVol
pipette.dispense(volTip,i[well_range])
curVol -= volTip
curVol = 0
pipette.drop_tip()
|
StarcoderdataPython
|
1616685
|
<reponame>Keesiu/meta-kaggle<filename>data/external/repositories_2to3/141822/AXA_Telematics-master/Features/combine_output_files_Forest.py
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from random import sample
import os
import sys
import time
import csv
from modules_janto.paths import *
blockresults = os.path.join(FEATURES,"blocks","results")
print('\n\nCombining block outputs Random Forest (feature set 3)...\n')
def break_input_file_features(combined_file):
output_file = open(combined_file, 'wb')
output_csv = csv.writer(output_file)
cnt=0
for i in range(1,29):
input_found = 0
try:
telematics_input = open(os.path.join(blockresults,"Telematics_results_Forest_Run3_block_" + str(i)+ ".csv"),'rU')
telematics_csv = csv.reader(telematics_input)
input_found = 1
except:
input_found = 0
if (i < 28 ):
print(("Don't have ",i))
cnt+=1
if (input_found ==1):
header = next(telematics_csv)
if (i==1 ):
output_csv.writerow(['driver_trip','prob'])
cnt1 = 0
for row in telematics_csv:
output_csv.writerow(row)
cnt1 += 1
if (cnt1 != 20000 and i != 28):
print((" Not Finished ",i))
#print("Finished Combining Block ",i, j)
print((" total not found ",cnt))
output_file.close()
return
combined_file = os.path.join(MODELS,"RF_Telematics_results_Forest_featureset3.csv")
break_input_file_features(combined_file)
print('Combined output written to\n\t', MODELS, '\n\n')
|
StarcoderdataPython
|
1619534
|
#!/usr/bin/env python3
"""Common functions for Flask webUI"""
import os
import sqlite3
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import constants # noqa
def initial_state():
"""
Set the factory settings for the application.
The settings are stored in a dictionary.
Returns:
An empty dict()
"""
defstate = dict()
return defstate
class Fles:
"""
Class for a supporting object
"""
def __init__(self):
# app info :
# path to this file as a list of elements
self.HERE = os.path.realpath(__file__).split('/')
self.MYLEVEL = 4 # kimnaty =1, bin =2, fles =3
# # element that contains the appname (given the location of this file)
# self.MYAPP = self.HERE[-self.MYLEVEL]
# # absolute path to the app's root
self.MYROOT = "/".join(self.HERE[0:-self.MYLEVEL])
# self.NODE = os.uname()[1] # name of the host
# self.ROOM_ID = self.NODE[-2:] # inferred room-id
self.DATABASE = constants.KIMNATY['database']
self.CONFIG = f'{self.MYROOT}/.config/kimdata.json'
def get_latest_data(self, fields):
"""Retrieve the most recent datapoints from the database."""
db_con = sqlite3.connect(self.DATABASE)
with db_con:
db_cur = db_con.cursor()
db_cur.execute(f"SELECT {fields} FROM data \
WHERE sample_epoch = (SELECT MAX(sample_epoch) \
FROM kimnaty) \
;")
db_data = db_cur.fetchall()
return list(db_data[0])
|
StarcoderdataPython
|
175685
|
<reponame>openhealthcare/python-fp17
import datetime
from fp17 import treatments, exemptions
def annotate(bcds1):
bcds1.patient.surname = "BARNES"
bcds1.patient.forename = "SUSAN"
bcds1.patient.address = ["34 HIGH STREET"]
bcds1.patient.sex = 'F'
bcds1.patient.date_of_birth = datetime.date(1969, 7, 9)
bcds1.date_of_acceptance = datetime.date(2017, 4, 1)
bcds1.date_of_completion = datetime.date(2017, 4, 1)
# "Expectant Mother"
bcds1.exemption_remission = {
'code': exemptions.EXPECTANT_MOTHER.EVIDENCE_SEEN,
}
# Treatments: "Examination"
bcds1.treatments = [
treatments.TREATMENT_CATEGORY(1),
treatments.EXAMINATION,
]
return bcds1
|
StarcoderdataPython
|
3295375
|
<filename>features/utils.py<gh_stars>1-10
from typing import Dict
from typing import Iterable
from typing import List
from javalang.tokenizer import JavaToken, Identifier, Keyword, Literal
from javalang.tree import Node
def identifiers(tokens: List[JavaToken]) -> List[Identifier]:
return [it for it in tokens if isinstance(it, Identifier)]
def keywords(tokens: List[JavaToken]) -> List[Keyword]:
return [it for it in tokens if isinstance(it, Keyword)]
def literals(tokens: List[JavaToken]) -> List[Literal]:
return [it for it in tokens if isinstance(it, Literal)]
def children(node: Node) -> List:
nodes = []
for it in node.children:
if isinstance(it, List):
nodes += it
else:
nodes.append(it)
return nodes
def non_empty_lines(code: str) -> List[str]:
return [line for line in code.split('\n') if line.strip() != '']
def get_nodes(node, node_type) -> List:
result = []
if isinstance(node, node_type):
result.append(node)
for it in children(node):
if isinstance(it, Node):
result += get_nodes(it, node_type)
return result
def get_nodes_count(node, node_type) -> int:
return len(get_nodes(node, node_type))
def build_mapping_to_ids(values: Iterable) -> Dict:
values = sorted(set(values))
return {key: value for key, value in zip(values, range(len(values)))}
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.