id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1783939
|
from clickatell import Transport
class Http(Transport):
"""
Provides access to the Clickatell HTTP API
"""
def __init__(self, apiKey):
"""
Construct a new API instance with the auth key of the API
:param str apiKey: The auth key
"""
self.apiKey = apiKey
Transport.__init__(self)
def request(self, action, data={}, headers={}, method='GET'):
"""
Append the user authentication details to every incoming request
"""
data = self.merge(data, {'apiKey': self.apiKey})
return Transport.request(self, action, data, headers, method)
def sendMessage(self, to, message, extra={}):
"""
If the 'to' parameter is a single entry, we will parse it into a list.
We will merge default values into the request data and the extra parameters
provided by the user.
"""
to = to if isinstance(to, list) else [to]
data = {'to': to, 'content': message}
data = self.merge(data, extra)
content = self.parseResponse(self.request('messages/http/send', data))
return content
|
StarcoderdataPython
|
3399199
|
from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),unique=True,)
email = db.Column(db.String(255), unique=True, index=True)
pass_secure = db.Column(db.String(255))
posts = db.relationship('Post', backref='user', lazy='dynamic')
comment = db.relationship('Comment', backref='user', lazy='dynamic')
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def save_user(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'User {self.username}'
class Post(db.Model, UserMixin):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post = db.Column(db.Text, nullable=False)
comment = db.relationship('Comment', backref='post', lazy='dynamic')
category = db.Column(db.String, nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow)
author = db.Column(db.String)
up_vote = db.relationship('Upvote', backref='post', lazy='dynamic')
down_vote = db.relationship('Downvote', backref='post', lazy='dynamic')
def save_post(self):
db.session.add(self)
db.session.commit()
def delete_post(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f"Post Title: {self.title}"
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
comment = db.Column(db.Text(), nullable=False)
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls, post_id):
comments = Comment.query.filter_by(post_id=post_id).all()
return comments
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f'Comments: {self.comment}'
class Upvote(db.Model):
__tablename__ = 'upvotes'
id = db.Column(db.Integer, primary_key=True)
upvote = db.Column(db.Integer, default=1)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
def save(self):
db.session.add(self)
db.session.commit()
def upvote(cls, id):
upvote_post = Upvote(user=current_user, post_id=id)
upvote_post.save()
@classmethod
def query_upvotes(cls, id):
upvote = Upvote.query.filter_by(post_id=id).all()
return upvote
@classmethod
def all_upvotes(cls):
upvotes = Upvote.query.order_by('id').all()
return upvotes
def __repr__(self):
return f'{self.user_id}:{self.post_id}'
class Downvote(db.Model):
__tablename__ = 'downvotes'
id = db.Column(db.Integer, primary_key=True)
downvote = db.Column(db.Integer, default=1)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
def save(self):
db.session.add(self)
db.session.commit()
def downvote(cls, id):
downvote_post = Downvote(user=current_user, post_id=id)
downvote_post.save()
@classmethod
def query_downvotes(cls, id):
downvote = Downvote.query.filter_by(post_id=id).all()
return downvote
@classmethod
def all_downvotes(cls):
downvote = Downvote.query.order_by('id').all()
return downvote
def __repr__(self):
return f'{self.user_id}:{self.post_id}'
|
StarcoderdataPython
|
1687446
|
<reponame>waleoyediran/django-starter
from rest_framework import routers
from demoproject.api import views
api_router = routers.DefaultRouter()
api_router.register(r'users', views.UserViewSet)
|
StarcoderdataPython
|
119090
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of device_temp_file.py.
"""
import logging
import os
import sys
import unittest
from pylib import constants
from pylib.utils import device_temp_file
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
class DeviceTempFileTest(unittest.TestCase):
def setUp(self):
self.device_utils = mock.MagicMock()
def testTempFileNameAlreadyExists(self):
self.device_utils.GetExternalStoragePath.return_value = '/sdcard'
self.device_utils.FileExists.side_effect = [True, True, True, False]
tmpfile = device_temp_file.DeviceTempFile(self.device_utils)
logging.debug('Temp file name: %s' % tmpfile.name)
self.assertEqual(self.device_utils.FileExists.call_count, 4)
self.device_utils.WriteFile.assert_called_with(tmpfile.name, '')
def testTempFileLifecycle(self):
self.device_utils.GetExternalStoragePath.return_value = '/sdcard'
self.device_utils.FileExists.return_value = False
with device_temp_file.DeviceTempFile(self.device_utils) as tmpfile:
filename = tmpfile.name
self.assertEqual(self.device_utils.WriteFile.call_count, 1)
self.assertNotEqual(self.device_utils.RunShellCommand.call_args,
mock.call(['rm', mock.ANY]))
self.assertEqual(self.device_utils.RunShellCommand.call_args,
mock.call(['rm', filename]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
StarcoderdataPython
|
186134
|
from http.client import HTTPConnection
import sys, urllib.parse
def stop_server (port):
"""send QUIT request to http server running on localhost:<port>"""
conn = HTTPConnection("localhost:%d" % port)
conn.request("PRINT", "/", "THIS IS IT PRINTING")
resp = conn.getresponse()
conn = HTTPConnection("localhost:%d" % port)
conn.request("ERROR", "/", "THIS IS IT ERORRING")
resp = conn.getresponse()
conn = HTTPConnection("localhost:%d" % port)
conn.request("QUIT", "/", "THIS IS IT QUITTING")
resp = conn.getresponse()
print(resp)
assert len(sys.argv) > 1, "Error, correct usage: %s <port number>" % sys.argv[0]
assert sys.argv[1].isdigit(), "invalid port %r" % sys.argv[1]
port = int(sys.argv[1])
stop_server(port)
|
StarcoderdataPython
|
1641547
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 9 00:35:02 2021
@author: loaner
"""
import numpy as np
from matplotlib import pyplot as plt
import os
from scipy.interpolate import make_interp_spline
Re_tau1 = [125, 180, 250, 550]
sparese = [0.02, 0.05, 0.1]
dummy_idx1 = 200
path = "raw_results/"
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=MEDIUM_SIZE) # fontsize of the figure title
err_U_sps = []
err_uv_sps = []
#for sprse in sparese:
for Re_tau in Re_tau1:
dummy_idx1 = 200
#dummy_idx2 = 70
"""
Get DNS data and Spline fitting
"""
str1 = 'DNS_data/Couette_Retau'+np.str(Re_tau)+'.dat'
data = np.loadtxt(str1)
y_h, y_plus, U_plus, uv_plus = data[:,0], data[:,1], data[:,2], data[:,9]
new_Re_tau = y_plus[-1]/2
spl_U = make_interp_spline(y_plus, U_plus)
spl_uv = make_interp_spline(y_plus, uv_plus)
idx = np.where(y_plus <new_Re_tau+0.01 )
plt.semilogx (y_plus[idx], U_plus[idx]*2/np.max(U_plus) , 'k--', label = r"$U_{dns}$")
plt.semilogx (y_plus[idx].reshape((-1,1)), uv_plus[idx] , 'b--', label = r"$uv_{dns}$")
#for Re_tau in Re_tau1:
for sprse in sparese:
#dummy_idx2 += 2
dummy_idx1 += 2
data_sparse = np.loadtxt('raw/Channel_Re_tau ='+np.str(Re_tau)+'_coeff-aux-pts='+np.str(dummy_idx1)+'_alpha_.txt')
yp_sps, U_sps, uv_sps = data_sparse[:,0], data_sparse[:, 1], data_sparse[:,2]
err_U_sps_loc = np.mean(np.absolute(spl_U(yp_sps) - U_sps) / np.absolute(spl_U(yp_sps) + 1e-5) )
err_uv_sps_loc = np.mean(np.absolute(spl_uv(yp_sps) - uv_sps))
err_U_sps.append(err_U_sps_loc*100)
err_uv_sps.append(err_uv_sps_loc*100)
plt.semilogx (yp_sps.reshape((-1,1)), 2*U_sps.reshape(-1)/np.max(U_plus), label = r"$U_{nn}$; data(%):"+np.str(sprse*100))
#plt.semilogx (y_plus, U_plus/np.max(U_plus) , 'k--', label = r"$U_{dns}$")
#plt.semilogx (yp_sps.reshape((-1,1)), U_sps.reshape(-1)/np.max(U_plus), 'r', label = r"$U_{nn}$")
plt.semilogx (yp_sps.reshape((-1,1)), uv_sps.reshape(-1), label = r"$uv_{nn}$; data(%):"+np.str(sprse*100))
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.xlabel(r"$y^+$")
plt.ylabel("values")
plt.title(r"Couette : Non Fickian low, $Re_{\tau}$ = "+np.str(Re_tau))
plt.tight_layout()
plt.savefig('pics/spase_nf_couette_Re_'+np.str(Re_tau)+'.png', dpi=300)
plt.show()
#plt.close(fig)
sparese = np.array(sparese)*100
plt.plot (sparese, err_U_sps[:3], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[0]))
plt.plot (sparese, err_U_sps[3:6], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[1]))
plt.plot (sparese, err_U_sps[6:9], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[2]))
plt.plot (sparese, err_U_sps[9:], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[3]))
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.xlabel(r"% of data")
plt.ylabel(" U : Error (%)")
plt.title(r"Couette : Non Fickian law, error in velocity")
plt.tight_layout()
plt.savefig('pics/cou_nf_u_err.png', dpi=300)
plt.show()
plt.plot (sparese, err_uv_sps[:3], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[0]))
plt.plot (sparese, err_uv_sps[3:6], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[1]))
plt.plot (sparese, err_uv_sps[6:9], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[2]))
plt.plot (sparese, err_uv_sps[9:], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[3]))
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.xlabel(r"% of data")
plt.ylabel(" uv : Error (%)")
plt.title(r"Couette : Non Fickian law, error in Reynolds Stress")
plt.tight_layout()
plt.savefig('pics/cou_nf_uv_err.png', dpi=300)
plt.show()
|
StarcoderdataPython
|
1690069
|
<gh_stars>0
from Node import Node
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def append(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
self.tail = new_node
else:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
def prepend(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
self.tail = new_node
else:
new_node.next = self.head
self.head.prev = new_node
self.head = new_node
def insertAfter(self, node, data):
given_node = self.search(node)['node']
new_node = Node(data)
next_node = given_node.next
next_node.prev = new_node
new_node.next = next_node
new_node.prev = given_node
given_node.next = new_node
def insertBefore(self, node, data):
given_node = self.search(node)['node']
new_node = Node(data)
prev_node = given_node.prev
prev_node.next = new_node
new_node.next = given_node
new_node.prev = prev_node
given_node.prev = new_node
def delete(self, elements):
index = -1
head = self.head
while head is not None:
index += 1
if index in elements:
prev_node = head.prev
next_node = head.next
if prev_node is not None:
prev_node.next = next_node
if next_node is not None:
next_node.prev = prev_node
head = head.next
def remove(self, key):
if isinstance(key, list):
self.delete(key)
else:
node_indexes = self.search(key)['indexes']
self.delete(node_indexes)
def search(self, data):
head = self.head
current_index = -1
info = {'node': None, 'count': 0, 'indexes': []}
while head is not None:
current_index += 1
if head.data == data:
info['node'] = head
info['count'] += 1
info['indexes'].append(current_index)
head = head.next
if info['count'] == 0:
return None
else:
return info
def sort(self):
current_node = self.head.next
while current_node is not None:
next_node = current_node.next
search_node = current_node.prev
while search_node is not None and search_node.data > current_node.data:
search_node = search_node.prev
self.remove(current_node.data)
if search_node is None:
current_node.prev = None
self.prepend(current_node.data)
else:
self.insertAfter(search_node.data, current_node.data)
current_node = next_node
def print(self):
output = ""
head = self.head
while head is not None:
output += str(head.data) + " ---> "
head = head.next
if not len(output) > 0:
output += "The list is empty!"
return output[:-6]
|
StarcoderdataPython
|
31475
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
from addonpayments.utils import GenerationUtils
class TestGenerationUtils:
def test_generate_hash(self):
"""
Test Hash generation success case.
"""
test_string = '20120926112654.thestore.ORD453-11.00.Successful.3737468273643.79347'
secret = 'mysecret'
expected_result = '368df010076481d47a21e777871012b62b976339'
result = GenerationUtils.generate_hash(test_string, secret)
assert expected_result == result
def test_generate_timestamp(self):
"""
Test timestamp generation. Hard to test this in a meaningful way. Checking length and valid characters.
"""
result = GenerationUtils().generate_timestamp()
match = re.match(r'([0-9]{14})', result)
assert match
def test_generate_order_id(self):
"""
Test order Id generation. Hard to test this in a meaningful way. Checking length and valid characters.
"""
result = GenerationUtils().generate_order_id()
match = re.match(r'[A-Za-z0-9-_]{32}', result)
assert match
|
StarcoderdataPython
|
2140
|
<reponame>WAvdBeek/CoAPthon3
#!/usr/bin/env python
import getopt
import socket
import sys
import cbor
#from cbor2 import dumps, loads
import json
import time
import traceback
from coapthon.client.helperclient import HelperClient
from coapthon.utils import parse_uri
from coapthon import defines
client = None
paths = {}
paths_extend = {}
my_base = ""
def usage(): # pragma: no cover
print("Command:\tknxcoapclient.py -o -p [-P]")
print("Options:")
print("\t-o, --operation=\tGET|GETNONE|PUT|POST|DELETE|DISCOVER|OBSERVE")
print("\t-p, --path=\t\t\tPath of the request")
print("\t-P, --payload=\t\tPayload of the request")
print("\t-c, --contenttype=\t\tcontenttype of the request")
print("\t-f, --payload-file=\t\tFile with payload of the request")
def get_url(line):
data = line.split(">")
url = data[0]
return url[1:]
def get_ct(line):
tagvalues = line.split(";")
for tag in tagvalues:
if tag.startswith("ct"):
ct_value_all = tag.split("=")
ct_value = ct_value_all[1].split(",")
return ct_value[0]
return ""
def get_base(url):
# python3 knxcoapclient.py -o GET -p coap://[fe80::6513:3050:71a7:5b98]:63914/a -c 50
my_url = url.replace("coap://","")
mybase = my_url.split("/")
return mybase[0]
def get_base_from_link(payload):
print("get_base_from_link\n")
global paths
global paths_extend
lines = payload.splitlines()
# add the
if len(paths) == 0:
my_base = get_base(get_url(lines[0]))
return my_base
def get_sn(my_base):
print("Get SN :");
sn = execute_get("coap://"+my_base+"/dev/sn", 60)
json_data = cbor.loads(sn.payload)
#print ("SN : ", json_data)
return json_data
def install(my_base):
sn = get_sn(my_base)
print (" SN : ", sn)
iid = "5" # installation id
if "000001" == sn :
# sensor, e.g sending
print ("--------------------")
print ("Installing SN: ", sn)
content = { 2: "reset"}
print("reset :", content);
execute_post("coap://"+my_base+"/.well-known/knx", 60, 60, content)
content = True
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = 1
print("set IA :", content);
execute_put("coap://"+my_base+"/dev/ia", 60, 60, content)
content = iid
execute_put("coap://"+my_base+"/dev/iid", 60, 60, content)
content = { 2: "startLoading"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# group object table
# id (0)= 1
# url (11)= /p/light
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ {0: 1, 11: "p/push", 7:[1], 8: [2] } ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g", 40)
# recipient table
# id (0)= 1
# ia (12)
# url (11)= .knx
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ {0: 1, 11: "/p/push", 7:[1], 12 :"blah.blah" } ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
content = False
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = { 2: "loadComplete"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
if "000002" == sn :
# actuator ==> receipient
# should use /fp/r
print ("--------------------")
print ("installing SN: ", sn)
content = True
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = 2
print("set IA :", content);
execute_put("coap://"+my_base+"/dev/ia", 60, 60, content)
content = iid
execute_put("coap://"+my_base+"/dev/iid", 60, 60, content)
content = { 2: "startLoading"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# group object table
# id (0)= 1
# url (11)= /p/light
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ { 0: 1, 11: "/p/light", 7:[1], 8: [1] } ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g", 40)
# publisher table
# id (0)= 1
# ia (12)
# url (11)= .knx
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ {0: 1, 11: ".knx", 7:[1], 12 :"blah.blah" } ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
content = False
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = { 2: "loadComplete"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# do a post
content = {"sia": 5678, "st": 55, "ga": 1, "value": 100 }
content = { 4: 5678, "st": 55, 7: 1, "value": 100 }
# st ga value (1)
#content = { 5: { 6: 1, 7: 1, 1: True } }
#execute_post("coap://"+my_base+"/.knx", 60, 60, content)
content = {4: 5678, 5: { 6: 1, 7: 1, 1: False } }
#execute_post("coap://"+my_base+"/.knx", 60, 60, content)
#execute_post("coap://[FF02::FD]:5683/.knx", 60, 60, content)
# no json tags as strings
def do_sequence_dev(my_base):
print("===================")
print("Get SN :");
sn = execute_get("coap://"+my_base+"/dev/sn", 60)
sn = get_sn(my_base)
print (" SN : ", sn)
print("===================")
print("Get HWT :");
execute_get("coap://"+my_base+"/dev/hwt", 60)
print("===================")
print("Get HWV :");
execute_get("coap://"+my_base+"/dev/hwv", 60)
print("===================")
print("Get FWV :");
execute_get("coap://"+my_base+"/dev/fwv", 60)
print("===================")
print("Get Model :");
execute_get("coap://"+my_base+"/dev/model", 60)
print("===================")
content = True
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
execute_get("coap://"+my_base+"/dev/pm", 60)
content = False
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
execute_get("coap://"+my_base+"/dev/pm", 60)
print("===================")
content = 44
print("set IA :", content);
execute_put("coap://"+my_base+"/dev/ia", 60, 60, content)
execute_get("coap://"+my_base+"/dev/ia", 60)
print("===================")
content = "my host name"
print("set hostname :", content);
execute_put("coap://"+my_base+"/dev/hostname", 60, 60, content)
execute_get("coap://"+my_base+"/dev/hostname", 60)
print("===================")
content = " iid xxx"
print("set iid :", content);
execute_put("coap://"+my_base+"/dev/iid", 60, 60, content)
execute_get("coap://"+my_base+"/dev/iid", 60)
# id ==> 0
# href ==> 11
# ga ==> 7
# cflag ==> 8
def do_sequence_fp_g_int(my_base):
# url, content, accept, contents
content = [ {0: 1, 11: "xxxx1", 8: [1,2,3,4,5], 7:[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/1", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
content = [ {0: 2, 11: "xxxxyyy2", 8: [1,4,5], 7:[44,55,33]}, {0: 3, 1: "xxxxyyy3", 8: [1,4,5], 7:[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/2", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
execute_del("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
def do_sequence_fp_g(my_base):
# url, content, accept, contents
content = [ {"id": 1, "href": "xxxx1", "cflag": [1,2,3,4,5], "ga":[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/1", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
content = [ {"id": 2, "href": "xxxxyyy2", "cflag": [1,4,5], "ga":[44,55,33]}, {"id": 3, "href": "xxxxyyy3", "cflag": [1,4,5], "ga":[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/2", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
execute_del("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
# id ==> 0
# ia ==> 12
# path ==> 112
# url ==> 10
# ga ==> 7
def do_sequence_fp_p_int(my_base):
# url, content, accept, contents
content = [ {0: 1, 12: "Ia.IA1", 112: "path1", 7:[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/1", 60)
# 40 == application-link format
execute_get("coap://"+my_base+"/fp/p", 40)
content = [ {0: 2, 12: "xxxxyyyia2", 112: "path2", 7:[44,55,33]},
{0: 3, 12: "xxxxyyyia3", 112: "path3", 7:[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/2", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
execute_del("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
def do_sequence_fp_p(my_base):
# url, content, accept, contents
content = [ {"id": 1, "ia": "Ia.IA1", "path": "path1", "ga":[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/1", 60)
# 40 == application-link format
execute_get("coap://"+my_base+"/fp/p", 40)
content = [ {"id": 2, "ia": "xxxxyyyia2", "path": "path2","ga":[44,55,33]}, {"id": 3, "ia": "xxxxyyyia3", "path": "path3","ga":[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/2", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
execute_del("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
# id ==> 0
# ia ==> 12
# path ==> 112
# url ==> 10
# ga ==> 7
def do_sequence_fp_r_int(my_base):
# url, content, accept, contents
content = [ { 0: 1, 12: "r-Ia.IA1", 112: "r-path1", 7:[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/1", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
content = [ { 0: 2, 12: "r-Ia.IA2", 10: "url2", 112: "r-path2", 7:[44,55,33]},
{0: 3, 12: "r-Ia.IA3", 112: "r-path3", 7:[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/2", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
execute_del("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
def do_sequence_fp_r(my_base):
# url, content, accept, contents
content = [ {"id": 1, "ia": "r-Ia.IA1", "path": "r-path1", "ga":[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/1", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
content = [ {"id": 2, "ia": "r-Ia.IA2", "path": "r-path2", "ga":[44,55,33]}, {"id": 3, "ia": "r-Ia.IA3", "path": "r-path3", "ga":[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/2", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
execute_del("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
# cmd ==> 2
def do_sequence_lsm_int(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {2 : "startLoading"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {2 : "loadComplete"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {2 : "unload"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
def do_sequence_lsm(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {"cmd": "startLoading"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {"cmd": "loadComplete"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {"cmd": "unload"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# ./knx resource
# sia ==> 4
# ga ==> 7
# st 6
def do_sequence_knx_knx_int(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.knx", 60)
content = {"value": { 4 : 5, 7: 7777 , 6 : "rp"}}
execute_post("coap://"+my_base+"/.knx", 60, 60, content)
execute_get("coap://"+my_base+"/.knx", 60)
# ./knx resource
def do_sequence_knx_knx(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.knx", 60)
content = {"value": { "sia" : 5, "ga": 7, "st": "rp"}}
execute_post("coap://"+my_base+"/.knx", 60, 60, content)
execute_get("coap://"+my_base+"/.knx", 60)
def do_sequence_knx_spake(my_base):
# url, content, accept, contents
# sequence:
# - parameter exchange: 15 (rnd)- return value
# - credential exchange: 10 - return value
# - pase verification exchange: 14 - no return value
content = { 15: b"a-15-sdfsdred"}
execute_post("coap://"+my_base+"/.well-known/knx/spake", 60, 60, content)
# pa
content = { 10: b"s10dfsdfsfs" }
execute_post("coap://"+my_base+"/.well-known/knx/spake", 60, 60, content)
# ca
content = { 14: b"a15sdfsdred"}
execute_post("coap://"+my_base+"/.well-known/knx/spake", 60, 60, content)
# expecting return
def do_sequence_knx_idevid(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/idevid", 282)
def do_sequence_knx_ldevid(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/ldevid", 282)
def do_sequence_knx_osn(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/osn", 60)
def do_sequence_knx_crc(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/crc", 60)
def do_sequence_oscore(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/f/oscore", 40)
execute_get("coap://"+my_base+"/p/oscore/replwdo", 60)
content = 105
execute_put("coap://"+my_base+"/p/oscore/replwdo", 60, 60, content)
execute_get("coap://"+my_base+"/p/oscore/replwdo", 60)
execute_get("coap://"+my_base+"/p/oscore/osndelay", 60)
content = 1050
execute_put("coap://"+my_base+"/p/oscore/osndelay", 60, 60, content)
execute_get("coap://"+my_base+"/p/oscore/osndelay", 60)
def do_sequence_core_knx(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx", 60)
content = { 1 : 5, 2: "reset"}
execute_post("coap://"+my_base+"/.well-known/knx", 60, 60, content)
def do_sequence_a_sen(my_base):
# url, content, accept, contents
content = {2: "reset"}
execute_post("coap://"+my_base+"/a/sen", 60, 60, content)
def do_sequence_auth(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/auth", 40)
def do_sequence_auth_at(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/auth/at", 40)
#
content = {0: b"id", 1 : 20, 2:b"ms",3:"hkdf", 4:"alg", 5:b"salt", 6:b"contextId"}
execute_post("coap://"+my_base+"/auth/at", 60, 60, content)
content = {0: b"id2", 1 : 20, 2:b"ms",3:"hkdf", 4:"alg", 5:b"salt", 6:b"contextId2"}
execute_post("coap://"+my_base+"/auth/at", 60, 60, content)
execute_get("coap://"+my_base+"/auth/at", 40)
execute_get("coap://"+my_base+"/auth/at/id", 60)
execute_del("coap://"+my_base+"/auth/at/id", 60)
def do_sequence_f(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/f", 40)
# note this one is a bit dirty hard coded...
execute_get("coap://"+my_base+"/f/417", 40)
execute_get("coap://"+my_base+"/.well-known/core", 40)
def do_sequence(my_base):
#sn = get_sn(my_base)
install(my_base)
return
do_sequence_dev(my_base)
#return
do_sequence_fp_g_int(my_base)
#do_sequence_fp_g(my_base)
do_sequence_fp_p_int(my_base)
#do_sequence_fp_p(my_base)
do_sequence_fp_r_int(my_base)
#do_sequence_fp_r(my_base)
do_sequence_lsm_int(my_base)
#do_sequence_lsm(my_base)
do_sequence_lsm_int(my_base)
# .knx
do_sequence_knx_knx_int(my_base)
#do_sequence_knx_knx(my_base)
do_sequence_knx_spake(my_base)
do_sequence_knx_idevid(my_base)
do_sequence_knx_ldevid(my_base)
do_sequence_knx_crc(my_base)
do_sequence_knx_osn(my_base)
do_sequence_oscore(my_base)
do_sequence_core_knx(my_base)
do_sequence_a_sen(my_base)
do_sequence_auth(my_base)
do_sequence_auth_at(my_base)
do_sequence_f(my_base)
def client_callback_discovery(response, checkdata=None):
print(" --- Discovery Callback ---")
global my_base
if response is not None:
print ("response code:",response.code)
print ("response type:",response.content_type)
if response.code > 100:
print("+++returned error+++")
return
if response.content_type == defines.Content_types["application/link-format"]:
print (response.payload.decode())
my_base = get_base_from_link(response.payload.decode())
do_sequence(my_base)
def code2string(code):
if code == 68:
return "(Changed)"
if code == 69:
return "(Content)"
if code == 132:
return "(Not Found)"
if code == 133:
return "(METHOD_NOT_ALLOWED)"
if code == 160:
return "(INTERNAL_SERVER_ERROR)"
return ""
def client_callback(response, checkdata=None):
print(" --- Callback ---")
if response is not None:
print ("response code:",response.code, code2string(response.code))
print ("response type:",response.content_type)
if response.code > 100:
print("+++returned error+++")
return
#print(response.pretty_print())
if response.content_type == defines.Content_types["text/plain"]:
if response.payload is not None:
print (type(response.payload), len(response.payload))
print ("=========")
print (response.payload)
print ("=========")
else:
print ("payload: none")
elif response.content_type == defines.Content_types["application/cbor"]:
print (type(response.payload), len(response.payload))
print ("=========")
print (response.payload)
print ("=========")
#json_data = loads(response.payload)
#print(json_data)
#print ("=========")
json_string = ""
try:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
except:
print("error in cbor..")
print (json_string)
print ("===+++===")
if checkdata is not None:
check_data = cbor.loads(checkdata)
check_string = json.dumps(check_data, indent=2, sort_keys=True)
print(" check: ")
print (check_string)
if check_string == json_string:
print(" =+++===> OK ")
else:
print(" =+++===> NOT OK ")
print (json_string)
elif response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
print ("application/vnd.ocf+cbor")
try:
print (type(response.payload), len(response.payload))
print ("=========")
print (response.payload)
print ("=========")
json_data = cbor.loads(response.payload)
print (json_data)
print ("---------")
except:
traceback.print_exc()
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
elif response.content_type == defines.Content_types["application/link-format"]:
print (response.payload.decode())
else:
if response.payload is not None:
print ("type, len", type(response.payload), len(response.payload))
print (response.payload)
#else:
# print (" not handled: ", response)
else:
print (" Response : None")
#check = True
#while check:
# chosen = eval(input("Stop observing? [y/N]: "))
# if chosen != "" and not (chosen == "n" or chosen == "N" or chosen == "y" or chosen == "Y"):
# print("Unrecognized choose.")
# continue
def client_callback_observe(response): # pragma: no cover
global client
print("Callback_observe")
check = True
while check:
chosen = eval(input("Stop observing? [y/N]: "))
if chosen != "" and not (chosen == "n" or chosen == "N" or chosen == "y" or chosen == "Y"):
print("Unrecognized choose.")
continue
elif chosen == "y" or chosen == "Y":
while True:
rst = eval(input("Send RST message? [Y/n]: "))
if rst != "" and not (rst == "n" or rst == "N" or rst == "y" or rst == "Y"):
print("Unrecognized choose.")
continue
elif rst == "" or rst == "y" or rst == "Y":
client.cancel_observing(response, True)
else:
client.cancel_observing(response, False)
check = False
break
else:
break
def execute_get(mypath, ct_value):
print ("---------------------------")
print ("execute_get: ", ct_value, mypath)
print (type(mypath))
if (mypath is None or len(mypath) < 5):
return
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return;
ct = {}
ct['accept'] = ct_value
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
response = nclient.get(path, None, None, **ct)
client_callback(response)
nclient.stop()
return response
def execute_del(mypath, ct_value):
print ("---------------------------")
print ("execute_del: ", ct_value, mypath)
do_exit = False
ct = {}
ct['accept'] = ct_value
ct['content_type'] = ct_value
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return;
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
nclientcheck = HelperClient(server=(host, port))
payload = 0
response = nclient.delete(path, None, None, **ct)
client_callback(response)
#nclient.stop()
#sys.exit(2)
print ("=======")
def execute_put(mypath, ct_value, accept, content):
print ("---------------------------")
print ("execute_put: ", ct_value, mypath)
do_exit = False
ct = {}
ct['accept'] = accept
ct['content_type'] = ct_value
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
nclientcheck = HelperClient(server=(host, port))
payload = 0
if accept == 60:
payload = cbor.dumps(content)
else:
payload = content
print ("payload: ", payload)
response = nclient.put(path, payload, None, None , None, **ct)
client_callback(response)
nclient.stop()
def execute_post(mypath, ct_value, accept, content):
print ("---------------------------")
print ("execute_post: ", ct_value, mypath)
print (content)
print (" ---------------------")
do_exit = False
ct = {}
ct['accept'] = accept
ct['content_type'] = ct_value
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
#nclientcheck = HelperClient(server=(host, port))
payload = 0
if accept == 60:
#print(" content :", content)
payload = cbor.dumps(content)
else:
payload = content
response = nclient.post(path, payload, None, None , None, **ct)
client_callback(response)
nclient.stop()
def main(): # pragma: no cover
global client
op = None
path = None
payload = None
content_type = None
#ct = {'content_type': defines.Content_types["application/link-format"]}
ct = {}
ct['accept'] = 40
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:p:P:f:c:", ["help", "operation=", "path=", "payload=",
"payload_file=","content-type"])
except getopt.GetoptError as err:
# print help information and exit:
print((str(err))) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-o", "--operation"):
op = a
elif o in ("-p", "--path"):
path = a
elif o in ("-P", "--payload"):
payload = a
elif o in ("-c", "--content-type"):
ct['accept'] = a
print ("content type request : ", ct)
elif o in ("-f", "--payload-file"):
with open(a, 'r') as f:
payload = f.read()
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
usage()
sys.exit(2)
if op is None:
print("Operation must be specified")
usage()
sys.exit(2)
if path is None:
print("Path must be specified")
usage()
sys.exit(2)
if not path.startswith("coap://"):
print("Path must be conform to coap://host[:port]/path")
usage()
sys.exit(2)
host, port, path = parse_uri(path)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
client = HelperClient(server=(host, port))
if op == "GET":
if path is None:
print("Path cannot be empty for a GET request")
usage()
sys.exit(2)
response = client.get(path, None, None, **ct)
print((response.pretty_print()))
if response.content_type == defines.Content_types["application/json"]:
json_data = json.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/link-format"]:
#json_data = cbor.loads(response.payload)
#json_string = json.dumps(json_data, indent=2, sort_keys=True)
#print ("JSON ::")
print (response.payload.decode())
print ("\n\n")
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
client.stop()
elif op == "GETNONE":
if path is None:
print("Path cannot be empty for a GET-None request")
usage()
sys.exit(2)
response = client.get_non(path, None, None, **ct)
print((response.pretty_print()))
if response.content_type == defines.Content_types["application/json"]:
json_data = json.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
client.stop()
elif op == "OBSERVE":
if path is None:
print("Path cannot be empty for a GET request")
usage()
sys.exit(2)
client.observe(path, client_callback_observe)
elif op == "DELETE":
if path is None:
print("Path cannot be empty for a DELETE request")
usage()
sys.exit(2)
response = client.delete(path)
print((response.pretty_print()))
client.stop()
elif op == "POST":
if path is None:
print("Path cannot be empty for a POST request")
usage()
sys.exit(2)
if payload is None:
print("Payload cannot be empty for a POST request")
usage()
sys.exit(2)
print ( "payload for POST (ascii):", payload )
print (ct['accept'] )
if ct['accept'] == str(defines.Content_types["application/cbor"]):
json_data = json.loads(payload)
cbor_data = cbor.dumps(json_data)
payload = bytes(cbor_data)
if ct['accept'] == str(defines.Content_types["application/vnd.ocf+cbor"]):
json_data = json.loads(payload)
cbor_data = cbor.loads(json_data)
payload = cbor_data
response = client.post(path, payload, None, None, **ct)
print((response.pretty_print()))
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
client.stop()
elif op == "PUT":
if path is None:
print("Path cannot be empty for a PUT request")
usage()
sys.exit(2)
if payload is None:
print("Payload cannot be empty for a PUT request")
usage()
sys.exit(2)
response = client.put(path, payload)
print((response.pretty_print()))
client.stop()
elif op == "DISCOVER":
#response = client.discover( path, client_callback, None, **ct)
response = client.discover( path, None, None, **ct)
if response is not None:
print(response.pretty_print())
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
if response.content_type == defines.Content_types["application/link-format"]:
#json_data = cbor.loads(response.payload)
#json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (response.payload.decode())
# do_get(response.payload.decode(), client)
client_callback_discovery(response)
counter = 2
try:
while counter > 0:
time.sleep(1)
counter = counter - 1
#client.stop()
except KeyboardInterrupt:
print("Client Shutdown")
#client.stop()
#execute_list()
client.stop()
else:
print("Operation not recognized")
usage()
sys.exit(2)
if __name__ == '__main__': # pragma: no cover
main()
|
StarcoderdataPython
|
153777
|
<reponame>nhtri2003gmail/ctf-write-ups
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./outbackdoor')
if args.REMOTE:
p = remote('pwn-2021.duc.tf', 31921)
else:
p = process(binary.path)
payload = b''
payload += 0x18 * b'A'
payload += p64(binary.sym.outBackdoor+1)
p.sendlineafter(b'song?\n', payload)
p.interactive()
|
StarcoderdataPython
|
3371274
|
import struct
import argparse
def write_spdz(input_folder, data):
f = open(input_folder + "/f0", 'w')
for d in data[0]:
sign = d < 0
output = struct.pack("?", sign)
f.write(output)
output = struct.pack("Q", abs(int(d)))
f.write(output)
f.close()
f = open(input_folder + "/f1", 'w')
for d in data[1]:
sign = d < 0
output = struct.pack("?", sign)
f.write(output)
output = struct.pack("Q", abs(int(d)))
f.write(output)
f.close()
def main():
parser = argparse.ArgumentParser(description="An MC2 input parser")
parser.add_argument("input_folder", type=str)
parser.add_argument("data_source", type=str)
args = parser.parse_args()
import importlib
data = importlib.import_module(args.data_source).data
write_spdz(args.input_folder, data)
main()
|
StarcoderdataPython
|
135285
|
# File name: main.py
# Copyright 2017 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
from flask import Flask, current_app, request, jsonify
import io
import model
import base64
import logging
app = Flask(__name__)
@app.route('/', methods=['POST'])
def predict():
data = {}
try:
data = request.get_json()['data']
except Exception:
return jsonify(status_code='400', msg='Bad Request'), 400
data = base64.b64decode(data)
image = io.BytesIO(data)
predictions = model.predict(image)
current_app.logger.info('Predictions: %s', predictions)
return jsonify(predictions=predictions)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
|
StarcoderdataPython
|
3358979
|
from flask import Flask, render_template
app = Flask(__name__, template_folder='templates')
@app.route("/")
def index():
return render_template("index.html")
# otro
@app.route("/video")
def video():
return render_template("video.html")
# material
@app.route("/material")
def material():
return render_template("material.html")
@app.route("/historia")
def disclimer():
return render_template("historia.html")
|
StarcoderdataPython
|
3390342
|
<filename>t4.py<gh_stars>0
print("git session")
|
StarcoderdataPython
|
1644957
|
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import modelcluster.fields
import wagtail.wagtailcore.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('v1', '0004_auto_20160712_1531'),
('jobmanager', '0002_auto_20160809_1619'),
]
operations = [
migrations.CreateModel(
name='EmailApplicationLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('address', models.EmailField(max_length=254)),
('label', models.CharField(max_length=255)),
('description', models.TextField(null=True, blank=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='JobListingPage',
fields=[
('cfgovpage_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='v1.CFGOVPage')),
('description', wagtail.wagtailcore.fields.RichTextField(verbose_name=b'Description')),
('open_date', models.DateField(verbose_name=b'Open date')),
('close_date', models.DateField(verbose_name=b'Close date')),
('salary_min', models.DecimalField(verbose_name=b'Minimum salary', max_digits=11, decimal_places=2)),
('salary_max', models.DecimalField(verbose_name=b'Maximum salary', max_digits=11, decimal_places=2)),
],
options={
'abstract': False,
},
bases=('v1.cfgovpage',),
),
migrations.CreateModel(
name='USAJobsApplicationLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('announcement_number', models.CharField(max_length=128)),
('url', models.URLField(max_length=255)),
('applicant_type', models.ForeignKey(related_name='usajobs_application_links', to='jobmanager.ApplicantType')),
('job_listing', modelcluster.fields.ParentalKey(related_name='usajobs_application_links', to='jobmanager.JobListingPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.AddField(
model_name='emailapplicationlink',
name='job_listing',
field=modelcluster.fields.ParentalKey(related_name='email_application_links', to='jobmanager.JobListingPage'),
),
]
|
StarcoderdataPython
|
3204349
|
<gh_stars>1-10
from .builder import build_loss
from .kld_loss import KLDLoss
__all__ = [
'build_loss',
'KLDLoss'
]
|
StarcoderdataPython
|
1783221
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for model_search.single_trainer."""
import os
from absl import flags
from absl.testing import absltest
from model_search import constants
from model_search import single_trainer
from model_search.data import csv_data
FLAGS = flags.FLAGS
class SingleTrainerTest(absltest.TestCase):
def test_try_models(self):
# Test is source code is deployed in FLAGS.test_srcdir
spec_path = os.path.join(FLAGS.test_srcdir, constants.DEFAULT_DNN)
trainer = single_trainer.SingleTrainer(
data=csv_data.Provider(
label_index=0,
logits_dimension=2,
record_defaults=[0, 0, 0, 0],
filename=os.path.join(
FLAGS.test_srcdir,
"model_search/model_search/data/testdata/"
"csv_random_data.csv")),
spec=spec_path)
trainer.try_models(
number_models=7,
train_steps=10,
eval_steps=10,
root_dir=FLAGS.test_tmpdir,
batch_size=2,
experiment_name="test",
experiment_owner="test")
if __name__ == "__main__":
absltest.main()
|
StarcoderdataPython
|
79933
|
<gh_stars>1-10
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.bots.bluechip_bridge_uncontested_bidding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from open_spiel.python.bots import bluechip_bridge
import pyspiel
class BluechipBridgeWrapperTest(absltest.TestCase):
def test_complete_deal_east(self):
# Plays a complete deal, with the mock external bot playing East.
# The deal is as follows:
#
# Vul: None
# S AKJ8
# H 4
# D JT9532
# C 32
# S 3 S Q9
# H KQJ8762 H AT5
# D K4 D A87
# C KQ4 C AJT96
# S T76542
# H 93
# D Q6
# C 875
#
# West North East South
# Pass 1N Pass
# 2D Pass 2H Pass
# 3S Dbl 4C Pass
# 4D Pass 4N Pass
# 5D Pass 6H Pass
# Pass Pass
#
# N E S W N E S
# S7 S3 SK S9
# DJ D8 D6 DK
# H2 H4 HT H9
# H5 H3 H6 C3
# C4 C2 CT C5
# C6 C7 CQ D2
# CK D3 CJ C8
# D4 D5 DA DQ
# C9 S2 H7 S8
# HK SJ HA S4
# CA S5 H8 D9
# HQ DT D7 S6
# HJ SA SQ ST
#
# Declarer tricks: 12
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
mock_client = absltest.mock.Mock(
**{
'read_line.side_effect': [
'Connecting "WBridge5" as ANYPL using protocol version 18',
'EAST ready for teams',
'EAST ready to start',
'EAST ready for deal',
'EAST ready for cards',
"EAST ready for NORTH's bid",
'EAST bids 1NT',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
'EAST bids 2H',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
'EAST bids 4C Alert.',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
'EAST bids 4NT',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
'EAST bids 6H',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
"EAST ready for SOUTH's card to trick 1",
'EAST ready for dummy',
'WEST plays 3s',
"EAST ready for NORTH's card to trick 1",
'EAST plays 9s',
"EAST ready for NORTH's card to trick 2",
'EAST plays 8d',
"EAST ready for SOUTH's card to trick 2",
'WEST plays kd',
'WEST plays 2h',
"EAST ready for NORTH's card to trick 3",
'EAST plays th',
"EAST ready for SOUTH's card to trick 3",
'EAST plays 5h',
"EAST ready for SOUTH's card to trick 4",
'WEST plays 6h',
"EAST ready for NORTH's card to trick 4",
'WEST plays 4c',
"EAST ready for NORTH's card to trick 5",
'EAST plays tc',
"EAST ready for SOUTH's card to trick 5",
'EAST plays 6c',
"EAST ready for SOUTH's card to trick 6",
'WEST plays qc',
"EAST ready for NORTH's card to trick 6",
'WEST plays kc',
"EAST ready for NORTH's card to trick 7",
'EAST plays jc',
"EAST ready for SOUTH's card to trick 7",
'WEST plays 4d',
"EAST ready for NORTH's card to trick 8",
'EAST plays ad',
"EAST ready for SOUTH's card to trick 8",
'EAST plays 9c',
"EAST ready for SOUTH's card to trick 9",
'WEST plays 7h',
"EAST ready for NORTH's card to trick 9",
'WEST plays kh',
"EAST ready for NORTH's card to trick 10",
'EAST plays ah',
"EAST ready for SOUTH's card to trick 10",
'EAST plays ac',
"EAST ready for SOUTH's card to trick 11",
'WEST plays 8h',
"EAST ready for NORTH's card to trick 11",
'WEST plays qh',
"EAST ready for NORTH's card to trick 12",
'EAST plays 7d',
"EAST ready for SOUTH's card to trick 12",
'WEST plays jh',
"EAST ready for NORTH's card to trick 13",
'EAST plays qs',
]
})
bot = bluechip_bridge.BlueChipBridgeBot(game, 1, mock_client)
state = game.new_initial_state()
history = [
33, 25, 3, 44, 47, 28, 23, 46, 1, 43, 30, 26, 29, 48, 24, 42, 13, 21,
17, 8, 5, 34, 6, 7, 37, 49, 11, 38, 51, 32, 20, 9, 0, 14, 35, 22, 10,
50, 15, 45, 39, 16, 12, 18, 27, 31, 41, 40, 4, 36, 19, 2, 52, 59, 52,
61, 52, 62, 52, 68, 53, 70, 52, 71, 52, 74, 52, 76, 52, 82, 52, 52, 52,
23, 7, 47, 31, 37, 25, 17, 45, 2, 10, 34, 30, 14, 6, 18, 4, 8, 0, 32,
12, 16, 20, 40, 1, 44, 5, 36, 24, 9, 13, 49, 41, 28, 3, 22, 27, 46, 39,
50, 11, 48, 15, 26, 29, 42, 33, 21, 19, 38, 51, 43, 35
]
# Check the bot provides the expected actions
for action in history:
if state.current_player() == 1:
bot_action = bot.step(state)
self.assertEqual(action, bot_action)
state.apply_action(action)
# Check the session went as expected; send_line calls are us sending
# data to the (mock) external bot.
mock_client.assert_has_calls([
absltest.mock.call.start(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('EAST ("WBridge5") seated'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
'Teams: N/S "north-south" E/W "east-west"'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('start of board'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
'Board number 1. Dealer NORTH. Neither vulnerable.'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
"EAST's cards: C A J T 9 6. D A 8 7. H A T 5. S Q 9."),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST bids 2D'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST bids 3S'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH DOUBLES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST bids 4D'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST bids 5D'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 7s'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
"Dummy's cards: C K Q 4. D K 4. H K Q J 8 7 6 2. S 3."),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays ks'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays jd'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 6d'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 4h'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 9h'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 3h'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 3c'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 2c'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 5c'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 7c'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 2d'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 3d'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 8c'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 5d'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays qd'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 2s'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 8s'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays js'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 4s'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 5s'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 9d'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays td'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 6s'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays as'),
absltest.mock.call.read_line(),
])
if __name__ == '__main__':
absltest.main()
|
StarcoderdataPython
|
178231
|
( ( ( nu*(15*nu/2 - 5/2)*r(t)**3/c )*nHat*nHat*nHat*SigmaVec )
+( ( -5*delta*nu*r(t)**3/(2*c) )*nHat*nHat*nHat*S ) )
|
StarcoderdataPython
|
3227257
|
#!/usr/bin/env python3
# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This tool checks that every create (table|view) is prefixed by
# drop (table|view).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def check(path):
with open(path) as f:
lines = [l.strip() for l in f.readlines()]
# Check that CREATE VIEW/TABLE has a matching DROP VIEW/TABLE before it.
errors = 0
d_type, d_name = None, None
for line in lines:
m = re.match(r'^DROP (TABLE|VIEW) IF EXISTS (.*);$', line)
if m is not None:
d_type, d_name = m.group(1), m.group(2)
continue
m = re.match(r'^CREATE (?:VIRTUAL )?(TABLE|VIEW) (.*) (?:AS|USING).*', line)
if m is None:
continue
type, name = m.group(1), m.group(2)
if type != d_type or name != d_name:
sys.stderr.write(
('Missing DROP %s before CREATE %s\n') % (d_type, d_type))
sys.stderr.write(('%s:\n"%s" vs %s %s\n') % (path, line, d_type, d_name))
errors += 1
d_type, d_name = None, None
# Ban the use of LIKE in non-comment lines.
for line in lines:
if line.startswith('--'):
continue
if 'like' in line.casefold():
sys.stderr.write(
'LIKE is banned in trace processor metrics. Prefer GLOB instead.\n')
sys.stderr.write('Offending file: %s\n' % path)
errors += 1
return errors
def main():
errors = 0
metrics_sources = os.path.join(ROOT_DIR, 'src', 'trace_processor', 'metrics',
'sql')
for root, _, files in os.walk(metrics_sources, topdown=True):
for f in files:
path = os.path.join(root, f)
if path.endswith('.sql'):
errors += check(path)
return 0 if errors == 0 else 1
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
1605951
|
<filename>itb-backup.py
#!/usr/bin/env python3
# Into The Breach saves backuper/restorer
#
# MAYBE: do not save again the same save (calc files hashes)
# TODO: remove external dependency (on pick)
# TODO: add save name dialogue (save save's meta info like date)
# TODO: config file/env vars support (where to save saves & etc)
# TODO: do not override settings by default
import argparse
import datetime as dt
import os
import pathlib
import shutil
import sys
# external dependency
from pick import pick
# TODO: pass cmd args
homedir = os.environ['HOME']
game_save_dir = "Library/Application Support/IntoTheBreach"
game_save_dir = os.path.join(homedir, game_save_dir)
save_baks_dir = "Documents/GameSaves/IntoTheBreach"
save_baks_dir = os.path.join(homedir, save_baks_dir)
#/20181208_1
def copy_subdirs(srcdir, dstdir):
src = os.listdir(srcdir)
os.mkdir(dstdir)
for f in src:
src = os.path.join(srcdir, f)
# dst = dstdir
dst = os.path.join(dstdir, f)
print("{} -> {}".format(src, dst))
if os.path.isfile(src):
# print("copy {} {}".format(src, dst))
shutil.copy(src, dstdir)
else:
# print("copy -r {} {}".format(src, dst))
shutil.copytree(src, dst)
def backup_cmd(args):
dtime = dt.datetime.now()
dtime_bak_subdir = dtime.strftime("%Y%m%d_%H%M")
print("new backup:", dtime_bak_subdir)
# return
src = game_save_dir
dst = os.path.join(save_baks_dir, dtime_bak_subdir)
# print("{} -> {}".format(src, dst))
# TODO: check is dst dir exists
copy_subdirs(src, dst)
def _get_saves():
return sorted([
f for f in os.listdir(save_baks_dir)
if os.path.isdir(os.path.join(save_baks_dir, f))
], key=str.lower, reverse=True)
def restore_cmd(args):
saves = _get_saves()
print("{}: {}".format(save_baks_dir, saves))
option, index = pick(["<< quit >>"] + saves, "available-saves")
if index == 0:
return
restore_dir = os.path.join(save_baks_dir, option)
print("restore save from dir:", restore_dir)
orig_bak_dir = game_save_dir + ".bak"
# print(f"shutil.rmtree({orig_bak_dir})", )
# print(f"shutil.move({game_save_dir}, {orig_bak_dir})")
shutil.rmtree(orig_bak_dir, ignore_errors=True)
shutil.move(game_save_dir, orig_bak_dir)
try:
# print(f"copy_subdirs({restore_dir}, {game_save_dir})")
copy_subdirs(restore_dir, game_save_dir)
except Exception as ex:
print("failed to restore save dir")
print(ex)
print("try to restore removed original save")
print(f"shutil.rmtree({game_save_dir})")
print(f"shutil.move({orig_bak_dir}, {game_save_dir})")
shutil.rmtree(game_save_dir, ignore_errors=True)
shutil.move(orig_bak_dir, game_save_dir)
def list_cmd(args):
saves = _get_saves()
print(f"Saves in {game_save_dir}:")
for s in saves:
print(s)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_backup = subparsers.add_parser(
"save",
help="Save current ITB progress")
parser_backup.set_defaults(func=backup_cmd)
parser_restore = subparsers.add_parser(
"load",
help="Restore ITB save")
parser_restore.set_defaults(func=restore_cmd)
parser_list = subparsers.add_parser(
"show",
help="Show ITB saves")
parser_list.set_defaults(func=list_cmd)
args = parser.parse_args()
args.func(args)
main()
# https://docs.python.org/3/library/datetime.html
# https://docs.python.org/3/library/argparse.html
|
StarcoderdataPython
|
199074
|
"""Test the factory for classifiers"""
import pytest
from src.classification.classifier_factory import ClassifierFactory
from src.classification.image import (
CrossAttentionNetworkClassifier,
MultiTaskEfficientNetB2Classifier,
VGG16Classifier,
)
from src.classification.text import (
BertClassifier,
DistilBertClassifier,
NRCLexTextClassifier,
)
def test_text_factory():
classifier = ClassifierFactory.get("text", "nrclex", {})
assert isinstance(classifier, NRCLexTextClassifier)
classifier = ClassifierFactory.get("text", "bert", {"model_name": "123"})
assert isinstance(classifier, BertClassifier)
assert classifier.model_name == "123"
classifier = ClassifierFactory.get("text", "distilbert", {})
assert isinstance(classifier, DistilBertClassifier)
with pytest.raises(ValueError):
_ = ClassifierFactory.get("wrong", "bert", {})
with pytest.raises(ValueError):
_ = ClassifierFactory.get("text", "wrong", {})
def test_image_factory():
classifier = ClassifierFactory.get("image", "vgg16", {})
assert isinstance(classifier, VGG16Classifier)
classifier = ClassifierFactory.get(
"image", "cross_attention", {"model_name": "123"}
)
assert isinstance(classifier, CrossAttentionNetworkClassifier)
assert classifier.parameters["model_name"] == "123"
classifier = ClassifierFactory.get("image", "efficientnet", {})
assert isinstance(classifier, MultiTaskEfficientNetB2Classifier)
with pytest.raises(ValueError):
_ = ClassifierFactory.get("wrong", "efficientnet", {})
with pytest.raises(ValueError):
_ = ClassifierFactory.get("image", "wrong", {})
|
StarcoderdataPython
|
3398356
|
<gh_stars>0
from .converter import FeatureConverter, MapFeatureConverter
from .dataset import Dataset, MapDataset, map_dataset
__all__ = []
__all__ += ['FeatureConverter', 'MapFeatureConverter']
__all__ += ['Dataset', 'MapDataset', 'map_dataset']
|
StarcoderdataPython
|
167977
|
"""
Title | Project
Author: <NAME>
Contact: <<EMAIL>>
Created:
Updated:
License: MIT License <https://github.com/cannlytics/cannlytics-ai/blob/main/LICENSE>
"""
# Initialize a Socrata client.
# app_token = os.environ.get('APP_TOKEN', None)
# client = Socrata('opendata.mass-cannabis-control.com', app_token)
# # Get sales by product type.
# products = client.get('xwf2-j7g9', limit=2000)
# products_data = pd.DataFrame.from_records(products)
# # Get licensees.
# licensees = client.get("hmwt-yiqy", limit=2000)
# licensees_data = pd.DataFrame.from_records(licensees)
# # Get the monthly average price per ounce.
# avg_price = client.get("rqtv-uenj", limit=2000)
# avg_price_data = pd.DataFrame.from_records(avg_price)
# # Get production stats (total employees, total plants, etc.)
# production = client.get("j3q7-3usu", limit=2000, order='saledate DESC')
# production_data = pd.DataFrame.from_records(production)
|
StarcoderdataPython
|
4829894
|
<filename>tools/nntool/quantization/multiplicative/quantizers/lstm_mult_ne16.py
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import math
from copy import deepcopy
import numpy as np
from graph.types import LSTMParameters
from quantization.multiplicative.quantizers.rnn_mult_ne16 import (
calculatate_weight_q, limit_input_precision, roundup)
from quantization.multiplicative.scaling_qtypes import MultMulBiasScaleQType
from quantization.new_qrec import QRec
from quantization.qtype import QType
from quantization.quantizer_options import *
from quantization.unified_quantization_handler import (in_qs_constraint,
option_constraint,
options,
out_qs_constraint,
params_type)
from utils.stats_funcs import calc_bits
from .rnn_mult_ne16 import NE16RNNMultQuantizionHandler, calc_bias_offset, calc_weight_q
LOG = logging.getLogger('nntool.' + __name__)
def get_maxq_val(stats, scale):
return np.ceil(np.maximum(np.abs(stats['min']), np.abs(stats['max'])) / scale)
def get_max(stat):
return np.maximum(np.abs(stat['min']), np.abs(stat['max']))
def get_max_or_one(stat):
gate_max = np.maximum(np.abs(stat['min']), np.abs(stat['max']))
return np.where(gate_max == 0, 1.0, gate_max)
@options(
NE16_WEIGHT_BITS_OPTION,
FORCE_EXTERNAL_SIZE_OPTION,
NARROW_WEIGHTS_OPTION,
USE_NE16_OPTION,
NARROW_STATE_OPTION,
MAX_PRECISION_LIMIT_OPTION
)
class LSTMMultMultNE16Base(NE16RNNMultQuantizionHandler):
@classmethod
def _quantize_lstm(cls, params, in_qs, stats, input_bits, **kwargs):
force_out_qs, out_dtype = cls.get_mult_opts(**kwargs)
force_out_q = force_out_qs and force_out_qs[0]
if force_out_qs and any(force_out_q is not None for force_out_q in force_out_qs):
return None
opts = kwargs.get('opts', {})
if input_bits == 16:
in_out_dtype = np.uint16
else:
in_out_dtype = np.uint8
if in_qs is None:
return None
in_qs = deepcopy(in_qs)
G = kwargs['G']
in_q = in_qs[0]
# if in_q.dtype != in_out_dtype:
# cls.check_valid_ranges(params, stats, idx=0, dirs='in')
# in_q = in_qs[0] = QType.from_min_max_sq(
# min_val=stats['range_in'][0]['min'],
# max_val=stats['range_in'][0]['max'],
# dtype=in_out_dtype,
# asymmetric=True)
cls.check_valid_ranges(params, stats, idx=0, dirs='out')
in_edges = G.indexed_in_edges(params.name)
names = {val: idx for idx, val in enumerate(
LSTMParameters.INPUT_NAMES)}
o_q = in_qs[names['i_state']] = QType.from_min_max_sq(
min_val=stats['range_out'][0]['min'],
max_val=stats['range_out'][0]['max'],
dtype=in_out_dtype,
narrow_range=opts['narrow_state'])
cell_range = stats.get('range_cell')
if cell_range is None:
raise ValueError(
f'cell range not present in stats for {params.name}')
# cell range in minimum 1.0
cell_stat = max(1.0, *[abs(cell_range[var])
for var in ['min', 'max']])
if params.cell_clip and not params.quant_c_state_with_stat:
cell_max = params.cell_clip
ratio_c = cell_max / cell_stat
if not (ratio_c > 0.9 and ratio_c < 1.1):
msg = (f"C state is forced to a range [-{cell_max}:{cell_max}] different to the one calulated "
f"from the inference statistic [-{cell_stat}:{cell_stat}], consider using nodeoption {params.name} "
"QUANT_C_STATE_WITH_STAT 1 to force it to be the one calculated")
LOG.warning('%s', msg)
else:
cell_max = cell_stat
# this limit is driven by the c_in * f + c * i calculation
# c * i will be in Q24 and we want c_in * f to be scaled to the same
# abs(f) will be <=1 so the cell int bits cannot exceed 31 - 1 (overflow) - 24 = 6
cell_limit = pow(2, 6)
if cell_max > cell_limit:
LOG.warning(
'Cell state exceeds %s and will be clipped', cell_limit)
cell_max = cell_limit
cell_int_bits = calc_bits(cell_max)
# cell stays signed since it is used in a haddamard with the int32 streamout
# in NE16
in_qs[names['c_state']] = QType.from_min_max_sq(
-cell_max, cell_max, dtype=np.int16 if input_bits == 16 else np.int8)
LOG.debug("cell bits %d max %d cell range %d",
cell_int_bits,
cell_max,
in_qs[names['c_state']].range)
# set weight qtypes
int_num_inp = roundup(params.n_inputs, input_bits == 16)
int_num_states = roundup(params.n_states, input_bits == 16)
for gate in ['i', 'o', 'c', 'f']:
i_idx = names[f'i_2_{gate}_w']
r_idx = names[f'r_2_{gate}_w']
in_qs[i_idx] = calc_weight_q(
in_edges[i_idx].from_node,
(params.n_states, params.n_inputs),
(params.n_states, int_num_inp),
opts['weight_bits'],
opts.get('narrow_weights'))
in_qs[r_idx] = calc_weight_q(
in_edges[r_idx].from_node,
(params.n_states, params.n_states),
(params.n_states, int_num_states),
opts['weight_bits'],
opts.get('narrow_weights'))
in_q = limit_input_precision(
params,
input_bits,
in_q,
int_num_inp,
opts['narrow_weights'],
opts['weight_bits'],
opts.get('max_precision_limit', MAX_PRECISION_LIMIT_OPTION['default']),
w_qs=[in_qs[names[f'i_2_{gate}_w']] for gate in ['i', 'o', 'c', 'f']],
out_ranges=[stats.get(f'range_{gate}_gate_i') for gate in ['i', 'o', 'c', 'f']])
o_q = limit_input_precision(
params,
input_bits,
o_q,
int_num_states,
opts['narrow_weights'],
opts['weight_bits'],
opts.get('max_precision_limit',
MAX_PRECISION_LIMIT_OPTION['default']),
extra_correction=-1 if opts.get('narrow_state') else 0,
w_qs=[in_qs[names[f'r_2_{gate}_w']] for gate in ['i', 'o', 'c', 'f']],
out_ranges=[stats.get(f'range_{gate}_gate_r') for gate in ['i', 'o', 'c', 'f']])
# setup zero offset bias adjustment
woffs = {}
for gate in ['i', 'o', 'c', 'f']:
i_idx = names[f'i_2_{gate}_w']
r_idx = names[f'r_2_{gate}_w']
woffs[gate] = [
calc_bias_offset(in_edges[i_idx].from_node, in_qs[i_idx], in_q.zero_point),
calc_bias_offset(in_edges[r_idx].from_node, in_qs[r_idx], o_q.zero_point),
]
# get weight scales
scale_pairs = {chan: ('i_2_%s_w' % chan, 'r_2_%s_w' % chan)
for chan in ['i', 'o', 'c', 'f']}
w_scales = [(in_qs[names[namei]].scale, in_qs[names[namer]].scale)
for k, (namei, namer) in scale_pairs.items()]
gate_sum_max = [
(get_max_or_one(stats[f'range_{gate}_gate_i']),
get_max_or_one(stats[f'range_{gate}_gate_r']))
for gate in ['i', 'o', 'c', 'f']
]
gate_sum_max_bits = [
(np.ceil(np.log2(gsm_i / (in_qs[0].scale * i_w))),
np.ceil(np.log2(gsm_r / (o_q.scale * r_w))))
for (gsm_i, gsm_r), (i_w, r_w) in zip(gate_sum_max, w_scales)]
for gate, (max_i, max_r) in zip(['i', 'o', 'c', 'f'], gate_sum_max_bits):
if np.max(max_i) > 30:
LOG.warning(
'max bits in accumulation input %s gate %s - there may be errors',
max_i, gate)
if np.max(max_r) > 30:
LOG.warning(
'max bits in accumulation state %s gate %s - there may be errors',
max_i, gate)
# LUT activations Q12 -> Q15
act_in_q = 12
act_out_q = 15
int_scale = math.pow(2, -act_in_q)
out_tanh_sig_scale = math.pow(2, -act_out_q)
scale_qtypes = {}
r_pscales = {}
i_pscales = {}
scale_qtypes['r_pscales'] = r_pscales
scale_qtypes['i_pscales'] = i_pscales
for gate, w_scale, max_bits in zip(['i', 'o', 'c', 'f'], w_scales, gate_sum_max_bits):
weight_scale_ratio = w_scale[0]/w_scale[1]
# TODO - decide to scale weights equal
i_pscales[gate] = w_scale[0] * in_q.scale
r_pscales[gate] = w_scale[1] * o_q.scale
if input_bits == 16:
scale_qtypes[f"i_2_{gate}_q"] = qscale = MultMulBiasScaleQType(
scale=i_pscales[gate] / int_scale
)
else:
scale_qtypes[f"i_2_{gate}_q"] = qscale = MultMulBiasScaleQType(
scale=i_pscales[gate] / r_pscales[gate]
)
if input_bits == 16:
i_zp_b = woffs[gate][0]
else:
i_zp_b = woffs[gate][0] * qscale.qbiases.astype(
np.int32) + (1 << (qscale.qnorms.astype(np.int32) - 1))
scale_qtypes[f"r_2_{gate}_q"] = qscale = MultMulBiasScaleQType(
scale=r_pscales[gate] / int_scale
)
if input_bits == 16:
r_zp_b = woffs[gate][1]
in_qs[names[f'{gate}_b']] = QType(
dtype=np.int32,
scale=r_pscales[gate],
offset=r_zp_b,
interleaved_values=[i_zp_b],
quantized_dimension=0
)
else:
r_zp_b = woffs[gate][1] * qscale.qbiases.astype(
np.int32) + (1 << (qscale.qnorms.astype(np.int32) - 1))
in_qs[names[f'{gate}_b']] = QType(
dtype=np.int32,
scale=r_pscales[gate] / qscale.qbiases,
offset=r_zp_b,
interleaved_values=[i_zp_b],
quantized_dimension=0
)
# NOTE - for 16 bit pre-normalize the scales to give us room but make sure it isn't negative
if input_bits == 16:
gate_prenorm = min(np.min([
np.min(scale_qtypes[f"{inp}_2_{gate}_q"].qnorms) for gate in ['i', 'o', 'c', 'f'] for inp in ['i', 'r']
]), 8)
for gate in ['i', 'o', 'c', 'f']:
for inp in ['i', 'r']:
scale_qtypes[f"{inp}_2_{gate}_q"].pre_normalization = gate_prenorm
else:
gate_prenorm = 0
r_pscales['state_out_scale'] = o_q.scale
r_pscales['int_scale'] = int_scale
# ct = c_in * f + c * i
# c * i = Q15 * Q15 -> Q30 -> norm(18) -> Q12
# scale(c_in * f) = Qcell * Q15 (prenorm if 16bit) and scale -> Q12
# ((c_in * f) + (c * i)) in Q12
# scale -> cell_out
# tan(ct) -> Q15
# o * tan(ct) -> Q30
# prenorm and scale
# scale result of c_state_1 * f_gate -> Q15
cell_in_scale = (in_qs[names['c_state']].scale *
out_tanh_sig_scale / out_tanh_sig_scale)
# cell_out from Q15 -> Q7/Q15 scaled
cell_out_scale = out_tanh_sig_scale / in_qs[names['c_state']].scale
state_out_scale = out_tanh_sig_scale / o_q.scale
r_pscales['act_out_scale'] = out_tanh_sig_scale
r_pscales['c_before_scale'] = int_scale
scale_qtypes['cell_in_q'] = MultMulBiasScaleQType(scale=cell_in_scale)
# NOTE - for 16 bit pre-normalize the scales to give us room
if input_bits == 16:
scale_qtypes['cell_in_q'].pre_normalization = 8
scale_qtypes['cell_out_q'] = MultMulBiasScaleQType(
scale=cell_out_scale)
scale_qtypes['state_out_q'] = MultMulBiasScaleQType(
scale=state_out_scale)
scale_qtypes['i_qtype'] = QType(q=act_in_q, dtype=np.int32)
if params.lstm_output_c_state:
out_qs = [o_q, in_qs[names['c_state']]]
else:
out_qs = [o_q]
return QRec.scaled(
in_qs=in_qs,
out_qs=out_qs,
ne16=True,
gate_prenorm=gate_prenorm,
**scale_qtypes,
)
@params_type(LSTMParameters)
@in_qs_constraint({'dtype': np.uint8})
@out_qs_constraint({'dtype': np.uint8})
@option_constraint(force_external_size={8, None}, use_ne16=True)
class LSTMMultMultNE16UInt8(LSTMMultMultNE16Base):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
return cls._quantize_lstm(params, in_qs, stats, 8, **kwargs)
@params_type(LSTMParameters)
@in_qs_constraint({'dtype': np.uint16})
@out_qs_constraint({'dtype': np.uint16})
@option_constraint(force_external_size=16, use_ne16=True)
class LSTMMultMultNE16UInt16(LSTMMultMultNE16Base):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
return cls._quantize_lstm(params, in_qs, stats, 16, **kwargs)
|
StarcoderdataPython
|
102340
|
#!/usr/bin/env python
# coding: utf-8
import argparse
import tensorflow as tf
import logging
import os
from nnrecsys.data.yoochoose.input import get_feature_columns, train_input_fn
from nnrecsys.data.yoochoose import constants
from nnrecsys.models.rnn import model_fn
from nnrecsys.training.hooks import ValidationMetricHook
from nnrecsys.utils import file_len
logging.getLogger().setLevel(logging.INFO)
dir_path = os.path.dirname(os.path.realpath(__file__))
def get_estimator(config):
n_items = file_len(constants.VOCABULARY_FILE)
return tf.estimator.Estimator(
model_fn=model_fn,
model_dir=os.path.join(dir_path, '../experiments', config['experiment']),
params={
'feature_columns': get_feature_columns(config),
'k': 20,
'n_items': n_items,
**config
})
def fit(config: dict, reporter=None) -> float:
estimator = get_estimator(config)
os.makedirs(estimator.eval_dir(), exist_ok=True)
early_stopping = tf.contrib.estimator.stop_if_no_decrease_hook(
estimator,
metric_name='loss',
max_steps_without_decrease=10_000_000 / config['batch_size'],
min_steps=2_000_000 / config['batch_size'])
eval_hooks = [ValidationMetricHook(
estimator,
lambda global_step, metrics: reporter(recall_at_k=metrics['recall_at_k'],
neg_loss=-metrics['loss']))] if reporter else []
eval_result, export_result = tf.estimator.train_and_evaluate(
estimator,
train_spec=tf.estimator.TrainSpec(lambda: train_input_fn(constants.TRAIN_PATH,
constants.VOCABULARY_FILE,
config['batch_size']).repeat(),
hooks=[early_stopping]),
eval_spec=tf.estimator.EvalSpec(lambda: train_input_fn(constants.VAL_PATH,
constants.VOCABULARY_FILE,
config['batch_size']),
hooks=eval_hooks))
return eval_result
def evaluate(config):
estimator = get_estimator(config)
estimator.evaluate(lambda: train_input_fn(constants.TEST_PATH, constants.VOCABULARY_FILE, batch_size=config['batch_size']))
def parse_args():
parser = argparse.ArgumentParser(description='GRU4Rec args')
parser.add_argument('--experiment', required=True, type=str)
parser.add_argument('--evaluate', type=str)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--rnn_layers', default=1, type=int)
parser.add_argument('--rnn_units', default=100, type=int)
parser.add_argument('--lr', default=0.01, type=float)
parser.add_argument('--hidden_activation', default='relu', type=str)
parser.add_argument('--dropout', default=1, type=float)
return parser.parse_args()
def main():
cli = parse_args()
config = {'experiment': cli.experiment,
'batch_size': cli.batch_size,
'rnn_layers': cli.rnn_layers,
'rnn_units': cli.rnn_units,
'lr': cli.lr,
'hidden_activation': cli.hidden_activation,
'dropout': cli.dropout}
if cli.evaluate:
logging.info('Evaluating')
evaluate(config)
else:
logging.info('Fitting')
fit(config)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1657521
|
# -*- coding: utf-8 -*-
def get_unique_method_id(view_method, request):
# todo: test me as UniqueMethodIdKeyBit
return "%s|%s" % (view_method.__name__, request.path.replace('/', '|'))
# return u'.'.join([
# view_instance.__module__,
# view_instance.__class__.__name__,
# view_method.__name__
# ])
def get_unique_method_id2(view_method, path):
# todo: test me as UniqueMethodIdKeyBit
return "%s|%s" % (view_method, path.replace('/', '|'))
class KeyBitBase(object):
def __init__(self, params=None):
self.params = params
def get_data(self, params, view_instance, view_method, request, args, kwargs):
"""
@rtype: dict
"""
raise NotImplementedError()
|
StarcoderdataPython
|
102455
|
from django import template
from django.db.models import Model
from django.http import Http404
from django.urls import reverse, resolve
from formfactory import models
register = template.Library()
@register.tag()
def render_form(parser, token):
"""{% render_form <form_slug> %}"""
tokens = token.split_contents()
if len(tokens) != 2:
raise template.TemplateSyntaxError(
"{% render_form <form_slug>/<object> %}"
)
return RenderFormNode(variable=tokens[1])
class RenderFormNode(template.Node):
def __init__(self, variable):
self.variable = template.Variable(variable)
def render(self, context):
try:
variable = self.variable.resolve(context)
except template.VariableDoesNotExist:
variable = self.variable.var
default_msg = "No FormFactory Form matches the given query. %s" % self.variable
# If the variable is a string type, attempt to find object based on
# slug field, otherwise pass the object along directly.
if not isinstance(variable, Model):
try:
form = models.Form.objects.get(slug=variable)
except models.Form.DoesNotExist:
raise Http404(default_msg)
elif isinstance(variable, models.Form):
form = variable
else:
raise Http404(default_msg)
url = form.absolute_url
view, args, kwargs = resolve(url)
request = context["request"]
# Store original request values.
original_method = request.method
original_path = request.path
original_info = request.path_info
# Assign new request values.
request.method = "GET"
request.path = url
request.path_info = url
# Call view to get result.
kwargs["inclusion_tag"] = True
result = view(request, *args, **kwargs)
# Replace request values with originals.
request.method = original_method
request.path = original_path
request.path_info = original_path
# This does not expect anything other than a TemplateResponse here.
result.render()
html = result.rendered_content
return html
|
StarcoderdataPython
|
3221351
|
import numpy as np
import torch as th
from torch import Tensor
# Reference: https://github.com/aayushmnit/Deep_learning_explorations/blob/master/1_MLP_from_scratch/Building_neural_network_from_scratch.ipynb
class Layer:
"""
A building block. Each layer is capable of performing two things:
- Process input to get output: output = layer.forward(input)
- Propagate gradients through itself: grad_input = layer.backward(input, grad_output)
Some layers also have learnable parameters which they update during layer.backward.
"""
def __init__(self):
"""Here we can initialize layer parameters (if any) and auxiliary stuff."""
# A dummy layer does nothing
pass
def forward(self, input: Tensor) -> Tensor:
"""
Takes input data of shape [batch, input_units], returns output data [batch, output_units]
"""
# A dummy layer just returns whatever it gets as input.
return input
def backward(self, input: Tensor, grad_output: Tensor):
"""
Performs a backpropagation step through the layer, with respect to the given input.
To compute loss gradients w.r.t input, we need to apply chain rule (backprop):
d loss / d x = (d loss / d layer) * (d layer / d x)
Luckily, we already receive d loss / d layer as input, so you only need to multiply it by d layer / d x.
If our layer has parameters (e.g. dense layer), we also need to update them here using d loss / d layer
"""
# The gradient of a dummy layer is precisely grad_output, but we'll write it more explicitly
num_units = input.shape[1]
d_layer_d_input = th.eye(num_units)
return th.dot(grad_output, d_layer_d_input) # chain rule
|
StarcoderdataPython
|
1723273
|
<filename>Chaos/logistic.py
from manimlib.imports import *
def logistic(r,x):
return r*x*(1-x)
r = 3.5
class logistic_map(MovingCameraScene):
def func(self,t):
return np.array([t,logistic(r,t),0])
def construct(self):
s = Square(color=YELLOW)
s.scale(0.5);s.move_to(np.array([0.5,0.5,0]))
T = Title("Logistic Map");T.set_color(GREEN)
self.play(Write(T))
self.wait(2)
self.play(FadeOut(T))
x_axis = Line(np.array([-3,0.1,0]),np.array([3,0.1,0]))
x_axis.set_color(GREEN);x_axis.set_stroke(None,3.1)
self.play(self.camera_frame.set_width,s.get_width()*1.5,self.camera_frame.move_to,s)
self.camera_frame.shift(0.015*UP)
func=ParametricFunction(self.func, t_min=0,t_max=1, fill_opacity=0)
func.set_color(RED)
self.play(Write(x_axis))
self.play(Write(func))
X = []; Y = []; x0 = 0.1;
for i in range(60):
x = x0
y = logistic(r,x0)
lin = Line(np.array([x,x,0]),np.array([x,y,0]),color=BLUE)
lin1 = Line(np.array([x,y,0]),np.array([y,y,0]),color=BLUE)
lin.set_stroke(None,1.8); lin1.set_stroke(None,1.8)
D = Dot(radius=0.006,color=YELLOW)
D.move_to(np.array([x,y,0]))
self.play(Write(lin),run_time=0.2)
self.play(Write(lin1),run_time=0.2)
self.add(D)
X.append(x);Y.append(y)
x0 = y
class bifurcation(MovingCameraScene):
def construct(self):
iterations=200000
r = np.linspace(2.5,4.0,200000)
N = NumberPlane();self.add(N)
s = Polygon(np.array([2.5,0,0]),np.array([4,0,0]),np.array([4,1,0]),np.array([2.5,1,0]))
T = Title("Bifurcation Diagram");T.set_color(GREEN)
self.play(Write(T))
self.wait(2)
self.play(FadeOut(T))
self.play(self.camera_frame.set_width,s.get_width()*1.5,self.camera_frame.move_to,s)
self.wait(); x = 0.00001; X=[];dot=[]
for i in range(iterations):
x = logistic(r[i],x)
X.append(x)
c = Dot(radius=0.00055,color=YELLOW)
c.move_to(np.array([r[i],x,0]))
dot.append(c)
#for j in range(5000):
# self.play(Write(dot[i]),run_time=0.005)
q = VGroup(*[dot[i] for i in range(len(dot))])
self.add(q)
self.wait(3)
|
StarcoderdataPython
|
1719001
|
#!/usr/bin/env python3
# <NAME> (bithack)
# 111601008
# Question No. 2a
# Here we are again. This time no bunny is floating in the middle of the ocean.
# And no agent want to find the dirt. This time we will write a script to count the
# number of the words in a file.
import sys
import os.path
def main(argv):
if len(argv) != 2:
print("Usage: python3 counting_words.py filepath")
sys.exit()
else:
try:
word_count = 0
current_state = 0
filepath = sys.argv[1]
if os.path.isfile(filepath):
# if the filepath exists and is a file
with open(filepath) as f:
while True:
ch = f.read(1)
print(ch)
if not ch:
break
if current_state==0:
if ch != '\n' and ch != ' ' and ch !='\t':
current_state=1
# print("incrementing")
word_count+=1
else:
if ch == '\n' or ch == ' ' or ch == '\t':
current_state=0
print("Total Words: " , word_count)
except:
print("Something went wrong.")
sys.exit()
if __name__ == "__main__":
main(sys.argv)
|
StarcoderdataPython
|
1772136
|
<filename>notebooks/data_generator.py<gh_stars>0
# Request Deps ----->
import pandas as pd
import requests
import json
import csv
import time
import datetime as dt
import praw
from psaw import PushshiftAPI
# Request Deps ----->
api = PushshiftAPI()
def grabdata(iterations,outputs):
runge = range(1,iterations)
for i in runge:
start_epoch = int(dt.datetime(2017, 1, 1).timestamp())
hello = list(api.search_submissions(after=start_epoch,
# Dearest Eric: Uncomment this two to put in a "specific sub":
# subreddit='secret',
filter=['url', 'title', 'subreddit','selftext','score'],
limit=outputs))
df = pd.DataFrame(hello)
out = "Data/final_data" + str(i)
print("Data Completed:")
print(out)
df.to_csv(out)
print("output at:",out)
grabdata(50,25000)
|
StarcoderdataPython
|
31294
|
from typing import Any, List, Mapping, Sequence
import jsonschema
from dataclasses import dataclass, field
from sqlalchemy.orm import scoped_session
from vardb.datamodel.jsonschemas.load_schema import load_schema
from vardb.datamodel import annotation
@dataclass
class ConverterConfig:
elements: Sequence[Mapping[str, Any]]
@dataclass(init=False)
class AnnotationImportConfig:
name: str
converter_config: ConverterConfig
def __init__(self, name: str, converter_config: Mapping[str, Any]) -> None:
self.name = name
self.converter_config = ConverterConfig(**converter_config)
@dataclass(init=False)
class AnnotationConfig:
deposit: Sequence[AnnotationImportConfig]
view: List = field(default_factory=list)
def __init__(self, deposit: Sequence[Mapping[str, Any]], view: List) -> None:
self.view = view
self.deposit = list()
for sub_conf in deposit:
self.deposit.append(AnnotationImportConfig(**sub_conf))
def deposit_annotationconfig(
session: scoped_session, annotationconfig: Mapping[str, Any]
) -> annotation.AnnotationConfig:
schema = load_schema("annotationconfig.json")
jsonschema.validate(annotationconfig, schema)
active_annotationconfig = (
session.query(annotation.AnnotationConfig)
.order_by(annotation.AnnotationConfig.id.desc())
.limit(1)
.one_or_none()
)
# Check if annotation config is equal. Note that for deposit, we do not care about order or duplicity
# Since the deposit is a list of dicts, we can not check set-equality (dicts are not hashable),
# so we check that all items in incoming are in active, and vice versa.
if (
active_annotationconfig
and all(x in active_annotationconfig.deposit for x in annotationconfig["deposit"])
and all(x in annotationconfig["deposit"] for x in active_annotationconfig.deposit)
and active_annotationconfig.view == annotationconfig["view"]
):
raise RuntimeError("The annotation config matches the current active annotation config.")
ac_obj = annotation.AnnotationConfig(
deposit=annotationconfig["deposit"], view=annotationconfig["view"]
)
session.add(ac_obj)
session.flush()
return ac_obj
|
StarcoderdataPython
|
3212865
|
<filename>ex25.py
def break_words(stuff):
"""This function will break up words for us."""
words=stuff.split('')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word=words.pop(0)
print(word)
def print_last_word(words):
"""prints the last word after popping it off."""
word=ords.pop(-1)
print(word)
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words=break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words=break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words=sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
import ex25
sentence="All good things come to those who wait."
words=ex25.break_words(sentence)
words
sorted_words=ex25.sort_words(words)
sorted_words
es25.print_first_word(words)
ex25.print_last_word(words)
words
ex25.print_first_word(sorted_words)
ex25.print_last_word(sorted_words)
sorted_words
sorted_words=ex25.sort_sentence(sentence)
sorted_words
ex25.print_first_and_last(sentence)
ex25.print_first_and_last_sorted(sentence)
|
StarcoderdataPython
|
3346249
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import ElasticsearchTestCase
class TestUnicode(ElasticsearchTestCase):
def test_indices_analyze(self):
self.client.indices.analyze(body='{"text": "привет"}')
|
StarcoderdataPython
|
56667
|
<gh_stars>0
import gin
import librosa
import numpy as np
import tensorflow as tf
import pretty_midi
PHONEMES = ['sil', 'b','d','f','g','h','j','k','l','m','n','p','r','s','t','v','w','z','zh','ch','sh','th','dh','ng','y','ae','ei','e','ii','i','ai','a','ou','u','ao','uu','oi','au','eo','er','oo']
PHONEME2ID={}
for i,p in enumerate(PHONEMES):
PHONEME2ID[p] = i
def load_midi(filename):
midi_data = pretty_midi.PrettyMIDI(filename)
if len(midi_data.instruments) > 1:
raise AssertionError(f"More than 1 track detected in {filename}")
return midi_data.instruments[0].notes
def load_text(filename):
text = open(filename).read()
text = ' '.join(text.split())
graph = text.split(' ')
if graph[-1] == '':
graph = graph[:-1]
return graph
@gin.register
def annotate_f0_and_phoneme(audio, sample_rate, midi_filename, phoneme_filename, frame_rate):
"""Parse Fundamental frequency (f0) annotations from midi_filename once per frame_rate,
Also parse phoneme annotations from phoneme_filename once per frame_rate
Args:
audio: Numpy ndarray of single audio (16kHz) example. Shape [audio_length,].
sample_rate: Sample rate in Hz.
midi_filename: path to MIDI file containing pitch annotations for audio.
phoneme_filename: text file of syllables corresponding to notes in midi_filename
frame_rate: Rate of f0 frames in Hz.
Returns:
f0_hz: Fundamental frequency in Hz. Shape [n_frames,].
phoneme_frames: Index of phonemes. Shape [n_frames,].
"""
audio_len_sec = audio.shape[-1] / float(sample_rate)
num_frames = int(audio_len_sec * frame_rate)
f0_hz = np.zeros(num_frames, dtype=np.float32)
phoneme_frames = np.zeros(num_frames, dtype=np.int64)
midi_data = load_midi(midi_filename)
phoneme_data = load_text(phoneme_filename)
for i,m in enumerate(midi_data):
start_frame = int(m.start * frame_rate)
end_frame = int(m.send * frame_rate)
f0_hz[start_frame:end_frame] = librosa.midi_to_hz(m.pitch)
phonemes = phoneme_data[i].split("_")
num_phonemes = len(phonemes)
frames_per_phoneme = int((end_frame - start_frame) / num_phonemes)
phoneme_frames[start_frame:end_frame] = PHONEME2ID[phonemes[-1]] #default to last phoneme to deal with rounding
for p in range(num_phonemes):
a = start_frame + (i * frames_per_phoneme)
b = a + frames_per_phoneme
phoneme_frames[a:b] = PHONEME2ID[phonemes[p]]
return f0_hz, phoneme_frames
|
StarcoderdataPython
|
3352836
|
<filename>exampleread.py
##############################################################################
## This file is part of 'smurftestapps'.
## It is subject to the license terms in the LICENSE.txt file found in the
## top-level directory of this distribution and at:
## https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
## No part of 'smurftestapps', including this file,
## may be copied, modified, propagated, or distributed except according to
## the terms contained in the LICENSE.txt file.
##############################################################################
# example file read for data
import numpy
import matplotlib.pyplot as plt
import sys
if len(sys.argv) < 2:
fn = '/data/smurf_stream/testout.txt'
else:
fn = sys.argv[1]
dat = numpy.loadtxt(fn)
print(dat.shape)
tm = dat[:,0]
for i in range(40,50):
plt.figure()
plt.plot(tm, dat[:,i], 'r-')
plt.title('%i' % (i))
plt.show()
plt.show()
|
StarcoderdataPython
|
1681725
|
"""
Nomad Regions Endpoint Access
"""
from nomad.api import base
class Regions(base.Endpoint):
"""Get information about the regions nomad knows about. This are
generally very low level, and not really useful for clients.
"""
def list(self):
"""Returns the known region names
:rtype: list
"""
return self._get_list()
|
StarcoderdataPython
|
3354686
|
import os
import logging
import log_wrapper
import sys
FILE_NAME = "./attributes.json"
STRING_TO_FIND = '"x-id":'
BASE = 140
OFFSET = 150
def renumber(argv):
"""
Read file and re-number all parameters/attributes.
This should only be used during initial development.
Once 'released' the x-id cannot change.
Does not support commands being in the same file.
"""
logger = logging.getLogger('renumber')
lst = []
count = OFFSET
with open(FILE_NAME, 'r') as fin:
logger.debug("Opened file " + FILE_NAME)
for line in fin:
default_append = True
if STRING_TO_FIND in line:
try:
x_id, number = line.split()
# Leave room for another (possibly incomplete) attribute list
number = number.strip(',')
if int(number) >= BASE:
s = x_id + ' ' + str(count) + ',\n'
lst.append(s)
default_append = False
count += 1
except:
logger.debug(f"Couldn't parse: {line.strip()}")
if default_append:
lst.append(line)
if len(lst) > 0:
with open(FILE_NAME, 'w') as fout:
fout.writelines(lst)
logger.debug("Wrote file " + FILE_NAME)
logger.info(f'{count - OFFSET} total attributes')
if __name__ == "__main__":
log_wrapper.setup(__file__, console_level=logging.DEBUG, file_mode='a')
renumber(sys.argv)
|
StarcoderdataPython
|
62390
|
from Statistics.ZScore import zscore
from Statistics.Mean import mean
from Statistics.StandardDeviation import standard_deviation
from Calculator.Subtraction import subtraction
from Calculator.Division import division
from Calculator.Multiplication import multiplication
from Calculator.Addition import addition
def population_correlation_coefficient(numbers, numbers1):
m = zscore(numbers)
n = zscore(numbers1)
value = list(map(lambda a, b: a * b, m, n))
p = division(len(value), sum(value))
return p
"""
x = mean(numbers)
y = mean(numbers1)
m = []
n = []
t = 0
for i in numbers:
zn = division(standard_deviation(numbers), subtraction(x, i))
m.append(zn)
for i in numbers1:
zm = division(standard_deviation(numbers1), subtraction(y, i))
n.append(zm)
for i in range(len(numbers)):
jk = multiplication(m[i], n[i])
t = addition(t, jk)
res = division(subtraction(1, len(numbers), t))
return res
"""
|
StarcoderdataPython
|
192821
|
<reponame>ecoo-app/ecoo-backend
# Generated by Django 3.1 on 2020-09-10 21:18
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("profiles", "0005_userprofile_place_of_origin"),
]
operations = [
migrations.AddField(
model_name="companyprofile",
name="phone_number",
field=models.CharField(
blank=True,
max_length=128,
validators=[
django.core.validators.RegexValidator(
"/(\\b(0041|0)|\\B\\+41)(\\s?\\(0\\))?(\\s)?[1-9]{2}(\\s)?[0-9]{3}(\\s)?[0-9]{2}(\\s)?[0-9]{2}\\b/",
"No valid swiss phone number",
)
],
),
),
]
|
StarcoderdataPython
|
3313808
|
class Module:
name = ""
info = ""
options = dict()
def run():
pass
|
StarcoderdataPython
|
3354732
|
<filename>vyperlogix/gds/man2c.py
'''
Converts a text file into a C function called manpage that prints
the indicated text to a stream.
Copyright (C) 2002 GDS Software
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
See http://www.gnu.org/licenses/licenses.html for more details.
'''
import sys, string
__version__ = "$Id: man2c.py,v 1.4 2002/08/21 17:37:39 donp Exp $"
def GetLines():
ifp = open(sys.argv[1])
lines = ifp.readlines()
ifp.close()
TransformLines(lines)
return lines
def PrintHeader():
print '''/* Automatically generated by man2c.py */\n
#include <stdio.h>
void manpage(FILE *ofp)
{
fprintf(ofp,
'''
def PrintLines(lines):
for line in lines:
print line
def PrintTrailer():
print " );"
print "\n}\n"
def TransformLines(lines):
'''Escape the characters so that they printf OK. This includes
double quotes, backslashes, tabs, and % characters.'''
for ix in xrange(len(lines)):
line = string.rstrip(lines[ix])
line = string.replace(line, "\\", r"\\")
line = string.replace(line, "\"", r"\"")
line = string.replace(line, "%", "%%")
line = string.replace(line, "\t", r"\t")
line = "\"" + line + "\\n\""
lines[ix] = line
def main():
if (len(sys.argv) != 2):
sys.stderr.write("Usage: man2c file\n")
sys.exit(1)
lines = GetLines()
PrintHeader()
PrintLines(lines)
PrintTrailer()
main()
|
StarcoderdataPython
|
1740368
|
<gh_stars>0
import socket
import select
import time
from threading import Thread, Timer
from typing import Dict, List, Tuple
import packet
from packet import MAX_PACKET_SIZE, Packet, SEQ_LIM, FINISHER_DATA
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
# from itertools import zip_longest
# args = [iter(iterable)] * n
# return (''.join(chr(i) for i in d if i is not None).encode()
# for d in zip_longest(*args, fillvalue=fillvalue))
for i in range((len(iterable) + n - 1) // n):
start = i * n
yield iterable[start:start + n]
class SirSocket:
"""Represents a socket for the SIR protocol."""
BUFFER_SIZE = 8192
TIMEOUT = 0.5
def __init__(self, address, start_seq):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.connect(address)
self.send_buf: Dict[int, bytes] = {}
self.recv_buf: Dict[int, bytes] = {}
self.acked_pkts = set()
self.timers = {}
self.next_seq = start_seq
print(self._sock.getpeername())
print(self._sock.getblocking())
def _start_timer(self, seq_num):
timer = Timer(self.TIMEOUT, self._handle_timeout, (seq_num, ))
timer.start()
self.timers[seq_num] = timer
def _handle_timeout(self, seq_num):
print(f"Timed out for {seq_num=}")
try:
self._sock.send(self.send_buf[seq_num])
except KeyError:
print(f"{seq_num=} not in send buffer during timeout.")
except (ConnectionRefusedError, ConnectionAbortedError):
print(f"Connection closed {seq_num=}.")
self.send_buf.clear()
else:
self._start_timer(seq_num)
def _recv(self):
while True:
print("reading...")
try:
pack = self._sock.recv(MAX_PACKET_SIZE, socket.MSG_DONTWAIT)
self._handle_pkt(pack)
print("read 1 pack")
except BlockingIOError:
print("Breaking...")
break
except (ConnectionRefusedError, ConnectionAbortedError, OSError):
print("Connection closed.")
self.send_buf.clear()
def _send_ack(self, seq_num):
print(f"Sending ack for {seq_num=}")
self._sock.send(packet.Packet(seq_num, True, False, b"").into_buf())
def _handle_pkt(self, pkt):
try:
packet = Packet.from_buf(pkt)
print(f"Received: seq_no = {packet.seq_no} data = {packet.data}")
except AssertionError:
print("Corrupted packet received")
return
if packet.ack: # packet is an ACK
try:
self.timers[packet.seq_no].cancel()
del self.timers[packet.seq_no]
except KeyError:
print("Got result")
if packet.data == FINISHER_DATA:
for i in self.timers:
i.cancel()
return
self.timers[(packet.seq_no - 1) % SEQ_LIM].cancel()
del self.timers[(packet.seq_no - 1) % SEQ_LIM]
self.recv_buf[packet.seq_no] = packet.data
elif packet.nak:
if packet.seq_no in self.timers:
self.timers[packet.seq_no].cancel()
self._sock.send(self.send_buf[packet.seq_no])
self._start_timer(packet.seq_no)
elif len(self.recv_buf) < self.BUFFER_SIZE:
self._send_ack(packet.seq_no)
self.recv_buf[packet.seq_no] = packet.data
print(f"Received new packet with {packet.seq_no=}.")
else:
print(f"Buffer Full: {packet.seq_no=}")
def read(self) -> List[Tuple[int, bytes]]:
"""Read the data sent via datagram by the connected socket.
Max size of data will be 65841 bytes.
If no data is available, it returns an empty string.
"""
self._recv()
items = self.recv_buf.items()
self.recv_buf.clear()
return sorted(items)
def write(self, data):
"""Send the data (of type bytes) to the connected socket.
The data should be less than 65481 bytes long.
"""
left_space = packet.DATA_SIZE * (self.BUFFER_SIZE - len(self.send_buf))
print("in write", data)
if left_space < len(data):
print("in write1")
raise ValueError(("Too much data.", left_space))
for data in grouper(data, packet.DATA_SIZE):
print("in write2")
pkt = packet.Packet(self.next_seq, False, False, data).into_buf()
print('Sending packet: ', pkt)
self.send_buf[self.next_seq] = pkt
self._sock.sendall(pkt)
self._start_timer(self.next_seq)
self.next_seq = (self.next_seq + 1) % (packet.SEQ_LIM)
|
StarcoderdataPython
|
1742798
|
import datetime
import enum
import attr
import six
from dbnd._core.utils.timezone import make_aware, utcnow
RESULT_PARAM = "result"
CURRENT_DATETIME = utcnow()
CURRENT_TIME_STR = CURRENT_DATETIME.strftime("%Y%m%d_%H%M%S")
CURRENT_DATE = CURRENT_DATETIME.date()
class EnvLabel(object): # env label
dev = "dev"
test = "test"
staging = "stage"
qa = "qa"
prod = "prod"
class CloudType(object):
local = "local"
gcp = "gcp"
aws = "aws"
azure = "azure"
class TaskExecutorType(object):
local = "local"
class OutputMode(object):
regular = "regular"
prod_immutable = "prod_immutable"
class _ConfigParamContainer(object):
_type_config = True
@classmethod
def is_type_config(self, cls):
# we can't use issubclass as there are "generic" types that will fail this check
return getattr(cls, "_type_config", False)
class _TaskParamContainer(object):
pass
class DescribeFormat(object):
short = "short"
long = "long"
verbose = "verbose"
# Compute Types
class EnumWithAll(enum.Enum):
@classmethod
def all(cls):
return list(cls)
@classmethod
def all_values(cls):
return [x.value for x in cls]
@classmethod
def all_names(cls):
return [x.name for x in cls]
class SparkClusters(EnumWithAll):
local = "local"
dataproc = "dataproc"
databricks = "databricks"
emr = "emr"
qubole = "qubole"
class ApacheBeamClusterType(object):
local = "local"
dataflow = "dataflow"
class ClusterPolicy(object):
NONE = "none"
CREATE = "create"
KILL = "kill"
EPHERMAL = "ephermal"
ALL = [NONE, CREATE, KILL, EPHERMAL]
class EmrClient(object):
LIVY = "livy"
STEP = "step"
class TaskType(object):
pipeline = "pipeline"
python = "python"
spark = "spark"
pyspark = "pyspark"
dataflow = "dataflow"
docker = "docker"
class TaskRunState(EnumWithAll):
SCHEDULED = "scheduled"
QUEUED = "queued"
RUNNING = "running"
SUCCESS = "success"
FAILED = "failed"
CANCELLED = "cancelled"
SHUTDOWN = "shutdown"
UPSTREAM_FAILED = "upstream_failed"
SKIPPED = "skipped"
UP_FOR_RETRY = "up_for_retry"
@staticmethod
def final_states():
return TaskRunState.finished_states() | {
TaskRunState.UPSTREAM_FAILED,
TaskRunState.SKIPPED,
}
@staticmethod
def final_states_str():
return [s.value for s in TaskRunState.final_states()]
@staticmethod
def finished_states():
return {TaskRunState.SUCCESS, TaskRunState.FAILED, TaskRunState.CANCELLED}
@staticmethod
def direct_fail_states():
return {TaskRunState.FAILED, TaskRunState.CANCELLED}
@staticmethod
def fail_states():
return {
TaskRunState.FAILED,
TaskRunState.CANCELLED,
TaskRunState.UPSTREAM_FAILED,
}
@staticmethod
def states_lower_case():
return [state.name.lower() for state in TaskRunState]
REUSED = "reused"
class RunState(EnumWithAll):
RUNNING = "running"
SUCCESS = "success"
FAILED = "failed"
SHUTDOWN = "shutdown"
CANCELLED = "cancelled"
@staticmethod
def finished_states_str():
return [
TaskRunState.SUCCESS.value,
TaskRunState.FAILED.value,
TaskRunState.CANCELLED.value,
]
class AlertStatus(EnumWithAll):
TRIGGERED = "TRIGGERED"
RESOLVED = "RESOLVED"
ACKNOWLEDGED = "ACKNOWLEDGED"
class AlertErrorPolicy(object):
none = ""
all = "all"
task_only = "task_only"
class SystemTaskName(object):
driver_submit = "dbnd_driver_submit"
driver = "dbnd_driver"
task_submit = "dbnd_task_submit"
driver_and_submitter = {driver_submit, driver}
@attr.s
class _DbndDataClass(object):
def asdict(self, filter=None):
return attr.asdict(self, recurse=False, filter=filter)
HEARTBEAT_DISABLED = make_aware(datetime.datetime.fromtimestamp(0))
class ParamValidation(EnumWithAll):
warn = "warn"
error = "error"
disabled = "disabled"
class DbndTargetOperationType(EnumWithAll):
init = "init"
read = "read"
write = "write"
reuse = "reuse"
log = "log"
log_hist = "log_hist"
delete = "delete"
class DbndTargetOperationStatus(EnumWithAll):
OK = "OK"
NOK = "NOK"
class SystemMetrics(EnumWithAll):
Duration = "Duration"
TotalCpuTime = "Total CPU Time"
TotalWallTime = "Total Wall Time"
ColdTotalCpuTime = "Cold Total CPU Time"
ColdTotalWallTime = "Cold Total Wall Time"
@staticmethod
def duration_metrics():
"""Used to select metrics for removal during metrics re-generation"""
return [
s.value
for s in [
SystemMetrics.Duration,
SystemMetrics.TotalCpuTime,
SystemMetrics.TotalWallTime,
SystemMetrics.ColdTotalCpuTime,
SystemMetrics.ColdTotalWallTime,
]
]
class UpdateSource(EnumWithAll):
dbnd = "dbnd"
airflow_monitor = "airflow_monitor"
airflow_tracking = "airflow_tracking"
azkaban_tracking = "azkaban_tracking"
def __eq__(self, other):
if isinstance(other, UpdateSource):
return self.value == other.value
elif isinstance(other, six.string_types):
return str(self) == other or str(self.value) == other
return False
@classmethod
def is_tracking(cls, source):
return source in [UpdateSource.airflow_tracking, UpdateSource.azkaban_tracking]
class MetricSource(object):
user = "user"
system = "system"
histograms = "histograms"
spark = "spark"
@classmethod
def all(cls):
return [cls.user, cls.system, cls.histograms, cls.spark]
@classmethod
def default_sources(cls):
return [cls.user, cls.system, cls.histograms, cls.spark]
@classmethod
def default_sources_str(cls):
return ",".join(cls.default_sources())
AD_HOC_DAG_PREFIX = "DBND_RUN."
class AlertSeverity(object):
CRITICAL = "CRITICAL"
HIGH = "HIGH"
MEDIUM = "MEDIUM"
LOW = "LOW"
@classmethod
def values(cls):
return [cls.CRITICAL, cls.HIGH, cls.MEDIUM, cls.LOW]
TASK_ESSENCE_ATTR = "task_essence"
class TaskEssence(enum.Enum):
ORCHESTRATION = "orchestration"
TRACKING = "tracking"
CONFIG = "config"
@classmethod
def is_task_cls(self, cls):
return (
hasattr(cls, TASK_ESSENCE_ATTR)
and getattr(cls, TASK_ESSENCE_ATTR) != self.CONFIG
)
def is_instance(self, obj):
"""
Checks if the object is include in the essence group.
>>> TaskEssence.TRACKING.is_instance(obj)
"""
return (
hasattr(obj, TASK_ESSENCE_ATTR) and getattr(obj, TASK_ESSENCE_ATTR) == self
)
|
StarcoderdataPython
|
1656937
|
PRIMITIVE_TYPES = (int, str, float, bool)
SEQUENCE_TYPES = (tuple, set, list)
MAPPING_TYPES = dict
ComponentName = str
TargetName = str
|
StarcoderdataPython
|
1773400
|
<reponame>mgorzkowski/abn
#!/usr/bin/env python
from ctypes import *
# Type definitions
# _abn_unit must be the same type like abn_unit in abn/include/abn.h file one
_abn_unit = c_uint32
# main type of ABN library
class _abn_t(Structure):
_fields_ = [("chain", POINTER(_abn_unit)),
("volume", c_uint)]
# pointers
_abn_t_p = POINTER(_abn_t)
_abn_unit_p = POINTER(_abn_unit)
# ABN Wrapper
class ABN:
"""This class wraps ABN library"""
def __init__(self, file):
# Library loading
self.lib = CDLL(file)
self.abn_unit = _abn_unit
self.abn_unit_p = _abn_unit_p
self.abn_t = _abn_t
self.abn_t_p = _abn_t_p
self.completion_code = c_int
self.completion_code_dictionary = {'SUCCESS': 0, 'ERROR': 0x80, 'ERROR_ARGUMENT_INVALID': 0x81}
self.size_of_abn_unit = sizeof(_abn_unit)
# Basic operations type settings
self.lib.abn_create.argtypes = [c_uint]
self.lib.abn_create.restype = self.abn_t_p
self.lib.abn_create_copy.argtypes = [self.abn_t_p]
self.lib.abn_create_copy.restype = self.abn_t_p
self.lib.abn_create_from_string.argtypes = [c_char_p]
self.lib.abn_create_from_string.restype = self.abn_t_p
self.lib.abn_create_empty.argtypes = None
self.lib.abn_create_empty.restype = self.abn_t_p
self.lib.abn_free.argtypes = [self.abn_t_p]
self.lib.abn_free.restype = None
self.lib.abn_reset.argtypes = [self.abn_t_p]
self.lib.abn_reset.restype = None
self.lib.abn_clone.argtypes = [self.abn_t_p, self.abn_t_p]
self.lib.abn_clone.restype = None
self.lib.abn_copy.argtypes = [self.abn_t_p, self.abn_t_p]
self.lib.abn_copy.restype = None
self.lib.abn_is_empty.argtypes = [self.abn_t_p]
self.lib.abn_is_empty.restype = c_bool
self.lib.abn_are_equal.argtypes = [self.abn_t_p, self.abn_t_p]
self.lib.abn_are_equal.restype = c_bool
self.lib.abn_get_byte.argtypes = [self.abn_t_p, c_uint]
self.lib.abn_get_byte.restype = c_byte
self.lib.abn_set_byte.argtypes = [self.abn_t_p, c_byte, c_uint]
self.lib.abn_set_byte.restype = None
self.lib.abn_is_negative.argtypes = [self.abn_t_p]
self.lib.abn_is_negative.restype = c_bool
self.lib.abn_is_positive.argtypes = [self.abn_t_p]
self.lib.abn_is_positive.restype = c_bool
self.lib.abn_is_zero.argtypes = [self.abn_t_p]
self.lib.abn_is_zero.restype = c_bool
self.lib.abn_is_greater.argtypes = [self.abn_t_p, self.abn_t_p]
self.lib.abn_is_greater.restype = c_bool
self.lib.abn_is_less.argtypes = [self.abn_t_p, self.abn_t_p]
self.lib.abn_is_less.restype = c_bool
self.lib.abn_to_string.argtypes = [self.abn_t_p]
self.lib.abn_to_string.restype = c_char_p
self.lib.abn_unit_to_string.argtypes = [self.abn_unit]
self.lib.abn_unit_to_string.restype = c_char_p
# Arithmetic operations type settings
self.lib.abn_add.argtypes = [self.abn_t_p, self.abn_t_p]
self.lib.abn_add.restype = None
self.lib.abn_adu.argtypes = [self.abn_t_p, self.abn_unit]
self.lib.abn_adu.restype = None
self.lib.abn_sub.argtypes = [self.abn_t_p, self.abn_t_p]
self.lib.abn_sub.restype = None
self.lib.abn_subu.argtypes = [self.abn_t_p, self.abn_unit]
self.lib.abn_subu.restype = None
self.lib.abn_inc.argtypes = [self.abn_t_p]
self.lib.abn_inc.restype = None
self.lib.abn_dec.argtypes = [self.abn_t_p]
self.lib.abn_dec.restype = None
self.lib.abn_neg.argtypes = [self.abn_t_p]
self.lib.abn_neg.restype = None
self.lib.abn_mul.argtypes = [self.abn_t_p, self.abn_t_p, self.abn_t_p]
self.lib.abn_mul.restype = None
self.lib.abn_mulu.argtypes = [self.abn_t_p, self.abn_t_p, self.abn_unit]
self.lib.abn_mulu.restype = None
self.lib.abn_smul.argtypes = [self.abn_t_p, self.abn_t_p, self.abn_t_p]
self.lib.abn_smul.restype = None
self.lib.abn_abs.argtypes = [self.abn_t_p]
self.lib.abn_abs.restype = c_bool
# Bit operations type settings
self.lib.abn_not.argtypes = [self.abn_t_p]
self.lib.abn_not.restype = None
self.lib.abn_and.argtypes = [self.abn_t_p, self.abn_t_p, self.abn_t_p]
self.lib.abn_and.restype = None
self.lib.abn_or.argtypes = [self.abn_t_p, self.abn_t_p, self.abn_t_p]
self.lib.abn_or.restype = None
self.lib.abn_xor.argtypes = [self.abn_t_p, self.abn_t_p, self.abn_t_p]
self.lib.abn_xor.restype = None
self.lib.abn_nand.argtypes = [self.abn_t_p, self.abn_t_p, self.abn_t_p]
self.lib.abn_nand.restype = None
self.lib.abn_nor.argtypes = [self.abn_t_p, self.abn_t_p, self.abn_t_p]
self.lib.abn_nor.restype = None
self.lib.abn_shift_left.argtypes = [self.abn_t_p, c_uint]
self.lib.abn_shift_left.restype = None
self.lib.abn_shift_right.argtypes = [self.abn_t_p, c_uint]
self.lib.abn_shift_right.restype = None
self.lib.abn_rotate_left.argtypes = [self.abn_t_p, c_uint]
self.lib.abn_rotate_left.restype = None
self.lib.abn_rotate_right.argtypes = [self.abn_t_p, c_uint]
self.lib.abn_rotate_right.restype = None
# Wrap functions of basic operations
def create(self, *params):
return self.lib.abn_create(*params)
def create_copy(self, *params):
return self.lib.abn_create_copy(*params)
def create_from_string(self, *params):
newParams = (params[0].encode('ascii'),)
return self.lib.abn_create_from_string(*newParams)
def create_empty(self, *params):
return self.lib.abn_create_empty(*params)
def free(self, *params):
return self.lib.abn_free(*params)
def reset(self, *params):
return self.lib.abn_reset(*params)
def clone(self, *params):
return self.lib.abn_clone(*params)
def copy(self, *params):
return self.lib.abn_copy(*params)
def is_empty(self, *params):
return self.lib.abn_is_empty(*params)
def are_equal(self, *params):
return self.lib.abn_are_equal(*params)
def get_byte(self, *params):
return self.lib.abn_get_byte(*params)
def set_byte(self, *params):
return self.lib.abn_set_byte(*params)
def to_string(self, *params):
retval = ""
for byte in self.lib.abn_to_string(*params):
retval += chr(byte)
return retval
def unit_to_string(self, *params):
retval = chr(self.lib.abn_unit_to_string(*params))
return retval
def is_negative(self, *params):
return self.lib.abn_is_negative(*params)
def is_positive(self, *params):
return self.lib.abn_is_positive(*params)
def is_zero(self, *params):
return self.lib.abn_is_zero(*params)
def is_greater(self, *params):
return self.lib.abn_is_greater(*params)
def is_less(self, *params):
return self.lib.abn_is_less(*params)
# Wrap functions of arithmetic operations
def add(self, *params):
return self.lib.abn_add(*params)
def adu(self, *params):
return self.lib.abn_adu(*params)
def sub(self, *params):
return self.lib.abn_sub(*params)
def subu(self, *params):
return self.lib.abn_subu(*params)
def inc(self, *params):
return self.lib.abn_inc(*params)
def dec(self, *params):
return self.lib.abn_dec(*params)
def neg(self, *params):
return self.lib.abn_neg(*params)
def mul(self, *params):
return self.lib.abn_mul(*params)
def mulu(self, *params):
return self.lib.abn_mulu(*params)
def smul(self, *params):
return self.lib.abn_smul(*params)
def abs(self, *params):
return self.lib.abn_abs(*params)
# Wrap functions of bit operations
def bit_not(self, *params):
return self.lib.abn_not(*params)
def bit_and(self, *params):
return self.lib.abn_and(*params)
def bit_or(self, *params):
return self.lib.abn_or(*params)
def bit_xor(self, *params):
return self.lib.abn_xor(*params)
def bit_nand(self, *params):
return self.lib.abn_nand(*params)
def bit_nor(self, *params):
return self.lib.abn_nor(*params)
def shift_left(self, *params):
return self.lib.abn_shift_left(*params)
def shift_right(self, *params):
return self.lib.abn_shift_right(*params)
def rotate_left(self, *params):
return self.lib.abn_rotate_left(*params)
def rotate_right(self, *params):
return self.lib.abn_rotate_right(*params)
|
StarcoderdataPython
|
76801
|
import os
import sys
import collections
import typing
import re
from json import JSONDecoder
from typing import List
import pandas as pd
import requests
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
from d3m import container, utils
from d3m.metadata import hyperparams, base as metadata_base
from d3m.container import DataFrame as d3m_DataFrame
from d3m.container import List as d3m_List
from ..utils.geocoding import check_geocoding_server
__author__ = "Distil"
__version__ = "1.0.8"
__contact__ = "mailto:<EMAIL>"
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
# LRU Cache helper class
class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
self.cache = collections.OrderedDict()
def get(self, key):
try:
value = self.cache.pop(key)
self.cache[key] = value
return value
except KeyError:
return -1
def set(self, key, value):
try:
self.cache.pop(key)
except KeyError:
if len(self.cache) >= self.capacity:
self.cache.popitem(last=False)
self.cache[key] = value
class Hyperparams(hyperparams.Hyperparams):
rampup_timeout = hyperparams.UniformInt(
lower=1,
upper=sys.maxsize,
default=100,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
description="timeout, how much time to give elastic search database to startup, \
may vary based on infrastructure",
)
target_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="indices of column with geolocation formatted as text that should be converted \
to lat,lon pairs",
)
cache_size = hyperparams.UniformInt(
lower=1,
upper=sys.maxsize,
default=2000,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
description="LRU cache size",
)
class GoatForwardPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
This primitive geocodes location names in specified columns into longitude/latitude coordinates.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "c7c61da3-cf57-354e-8841-664853370106",
"version": __version__,
"name": "Goat_forward",
"keywords": ["Geocoder"],
"source": {
"name": __author__,
"contact": __contact__,
"uris": ["https://github.com/kungfuai/d3m-primitives"],
},
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.16"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "UBUNTU",
"package": "default-jre-headless",
"version": "2:1.8-56ubuntu2",
},
{
"type": "TGZ",
"key": "photon-db-latest",
"file_uri": "http://public.datadrivendiscovery.org/photon.tar.gz",
"file_digest": "d7e3d5c6ae795b5f53d31faa3a9af63a9691070782fa962dfcd0edf13e8f1eab",
},
],
"python_path": "d3m.primitives.data_cleaning.geocoding.Goat_forward",
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.NUMERICAL_METHOD],
"primitive_family": metadata_base.PrimitiveFamily.DATA_CLEANING,
}
)
def __init__(
self,
*,
hyperparams: Hyperparams,
random_seed: int = 0,
volumes: typing.Dict[str, str] = None,
) -> None:
super().__init__(
hyperparams=hyperparams,
random_seed=random_seed,
volumes=volumes,
)
self._decoder = JSONDecoder()
self.volumes = volumes
self.goat_cache = LRUCache(self.hyperparams["cache_size"])
def _is_geocoded(self, geocode_result) -> bool:
# check if geocoding was successful or not
if (
geocode_result["features"] and len(geocode_result["features"]) > 0
): # make sure (sub-)dictionaries are non-empty
if geocode_result["features"][0]["geometry"]:
if geocode_result["features"][0]["geometry"]["coordinates"]:
return True
return False
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
"""
Accept a set of location strings, processes it and returns a set of long/lat coordinates.
Parameters
----------
inputs: D3M dataframe containing strings representing some geographic locations -
(name, address, etc) - one location per row in the specified target column
Returns
----------
Outputs:
D3M dataframe, with a pair of 2 float columns -- [longitude, latitude] -- per
original row/location column
"""
# confirm that server is responding before proceeding
address = "http://localhost:2322/"
PopenObj = check_geocoding_server(
address, self.volumes, self.hyperparams["rampup_timeout"]
)
# target columns are columns with location tag
target_column_idxs = self.hyperparams["target_columns"]
target_columns = [list(inputs)[idx] for idx in target_column_idxs]
target_columns_long_lat = [
target_columns[i // 2] for i in range(len(target_columns) * 2)
]
outputs = inputs.remove_columns(target_column_idxs)
# geocode each requested location
output_data = []
for i, ith_column in enumerate(target_columns):
j = 0
target_columns_long_lat[2 * i] = (
target_columns_long_lat[2 * i] + "_longitude"
)
target_columns_long_lat[2 * i + 1] = (
target_columns_long_lat[2 * i + 1] + "_latitude"
)
# remove ampersand from strings
inputs_cleaned = inputs[ith_column].apply(
lambda val: re.sub(r"\s*&\s*", r" and ", val)
)
for location in inputs_cleaned:
cache_ret = self.goat_cache.get(location)
row_data = []
if cache_ret == -1:
r = requests.get(address + "api?q=" + location)
tmp = self._decoder.decode(r.text)
if self._is_geocoded(tmp):
row_data = tmp["features"][0]["geometry"]["coordinates"]
self.goat_cache.set(location, str(row_data))
else:
self.goat_cache.set(location, "[float('nan'), float('nan')]")
else:
# cache_ret is [longitude, latitude]
row_data = [eval(cache_ret)[0], eval(cache_ret)[1]]
if len(output_data) <= j:
output_data.append(row_data)
else:
output_data[j] = output_data[j] + row_data
j = j + 1
# need to cleanup by closing the server when done...
PopenObj.kill()
# Build d3m-type dataframe
out_df = pd.DataFrame(
output_data, index=range(inputs.shape[0]), columns=target_columns_long_lat
)
d3m_df = d3m_DataFrame(out_df)
for i, ith_column in enumerate(target_columns_long_lat):
# for every column
col_dict = dict(d3m_df.metadata.query((metadata_base.ALL_ELEMENTS, i)))
col_dict["structural_type"] = type(0.0)
col_dict["semantic_types"] = (
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute",
)
col_dict["name"] = target_columns_long_lat[i]
d3m_df.metadata = d3m_df.metadata.update(
(metadata_base.ALL_ELEMENTS, i), col_dict
)
df_dict = dict(d3m_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict_1 = dict(d3m_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict["dimension"] = df_dict_1
df_dict_1["name"] = "columns"
df_dict_1["semantic_types"] = (
"https://metadata.datadrivendiscovery.org/types/TabularColumn",
)
df_dict_1["length"] = d3m_df.shape[1]
d3m_df.metadata = d3m_df.metadata.update((metadata_base.ALL_ELEMENTS,), df_dict)
return CallResult(outputs.append_columns(d3m_df))
|
StarcoderdataPython
|
3850
|
<filename>main.py
import tensorflow as tf
import os.path
import warnings
from distutils.version import LooseVersion
import glob
import helper
import project_tests as tests
#--------------------------
# USER-SPECIFIED DATA
#--------------------------
# Tune these parameters
NUMBER_OF_CLASSES = 2
IMAGE_SHAPE = (160, 576)
EPOCHS = 20
BATCH_SIZE = 1
LEARNING_RATE = 0.0001
DROPOUT = 0.75
# Specify these directory paths
DATA_DIRECTORY = './data'
RUNS_DIRECTORY = './runs'
TRAINING_DATA_DIRECTORY ='./data/data_road/training'
NUMBER_OF_IMAGES = len(glob.glob('./data/data_road/training/calib/*.*'))
VGG_PATH = './data/vgg'
all_training_losses = [] # Used for plotting to visualize if our training is going well given parameters
#--------------------------
# DEPENDENCY CHECK
#--------------------------
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
#--------------------------
# PLACEHOLDER TENSORS
#--------------------------
correct_label = tf.placeholder(tf.float32, [None, IMAGE_SHAPE[0], IMAGE_SHAPE[1], NUMBER_OF_CLASSES])
learning_rate = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32)
#--------------------------
# FUNCTIONS
#--------------------------
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
sess: TensorFlow Session
vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3, layer4, layer7)
"""
# load the model and weights
model = tf.saved_model.loader.load(sess, ['vgg16'], vgg_path)
# Get Tensors to be returned from graph
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name('image_input:0')
keep_prob = graph.get_tensor_by_name('keep_prob:0')
layer3 = graph.get_tensor_by_name('layer3_out:0')
layer4 = graph.get_tensor_by_name('layer4_out:0')
layer7 = graph.get_tensor_by_name('layer7_out:0')
return image_input, keep_prob, layer3, layer4, layer7
def conv_1x1(layer, layer_name):
""" Return the output of a 1x1 convolution of a layer """
return tf.layers.conv2d(inputs = layer,
filters = NUMBER_OF_CLASSES,
kernel_size = (1, 1),
strides = (1, 1),
name = layer_name)
def upsample(layer, k, s, layer_name):
""" Return the output of transpose convolution given kernel_size k and strides s """
return tf.layers.conv2d_transpose(inputs = layer,
filters = NUMBER_OF_CLASSES,
kernel_size = (k, k),
strides = (s, s),
padding = 'same',
name = layer_name)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes = NUMBER_OF_CLASSES):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
vgg_layerX_out: TF Tensor for VGG Layer X output
num_classes: Number of classes to classify
return: The Tensor for the last layer of output
"""
# Use a shorter variable name for simplicity
layer3, layer4, layer7 = vgg_layer3_out, vgg_layer4_out, vgg_layer7_out
# Apply a 1x1 convolution to encoder layers
layer3x = conv_1x1(layer = layer3, layer_name = "layer3conv1x1")
layer4x = conv_1x1(layer = layer4, layer_name = "layer4conv1x1")
layer7x = conv_1x1(layer = layer7, layer_name = "layer7conv1x1")
# Add decoder layers to the network with skip connections and upsampling
# Note: the kernel size and strides are the same as the example in Udacity Lectures
# Semantic Segmentation Scene Understanding Lesson 10-9: FCN-8 - Decoder
decoderlayer1 = upsample(layer = layer7x, k = 4, s = 2, layer_name = "decoderlayer1")
decoderlayer2 = tf.add(decoderlayer1, layer4x, name = "decoderlayer2")
decoderlayer3 = upsample(layer = decoderlayer2, k = 4, s = 2, layer_name = "decoderlayer3")
decoderlayer4 = tf.add(decoderlayer3, layer3x, name = "decoderlayer4")
decoderlayer_output = upsample(layer = decoderlayer4, k = 16, s = 8, layer_name = "decoderlayer_output")
return decoderlayer_output
def optimize(nn_last_layer, correct_label, learning_rate, num_classes = NUMBER_OF_CLASSES):
"""
Build the TensorFLow loss and optimizer operations.
nn_last_layer: TF Tensor of the last layer in the neural network
correct_label: TF Placeholder for the correct label image
learning_rate: TF Placeholder for the learning rate
num_classes: Number of classes to classify
return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# Reshape 4D tensors to 2D, each row represents a pixel, each column a class
logits = tf.reshape(nn_last_layer, (-1, num_classes))
class_labels = tf.reshape(correct_label, (-1, num_classes))
# The cross_entropy_loss is the cost which we are trying to minimize to yield higher accuracy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = class_labels)
cross_entropy_loss = tf.reduce_mean(cross_entropy)
# The model implements this operation to find the weights/parameters that would yield correct pixel labels
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
sess: TF Session
epochs: Number of epochs
batch_size: Batch size
get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
train_op: TF Operation to train the neural network
cross_entropy_loss: TF Tensor for the amount of loss
input_image: TF Placeholder for input images
correct_label: TF Placeholder for label images
keep_prob: TF Placeholder for dropout keep probability
learning_rate: TF Placeholder for learning rate
"""
for epoch in range(EPOCHS):
losses, i = [], 0
for images, labels in get_batches_fn(BATCH_SIZE):
i += 1
feed = { input_image: images,
correct_label: labels,
keep_prob: DROPOUT,
learning_rate: LEARNING_RATE }
_, partial_loss = sess.run([train_op, cross_entropy_loss], feed_dict = feed)
print("---> iteration: ", i, " partial loss:", partial_loss)
losses.append(partial_loss)
training_loss = sum(losses) / len(losses)
all_training_losses.append(training_loss)
print("------------------")
print("epoch: ", epoch + 1, " of ", EPOCHS, "training loss: ", training_loss)
print("------------------")
def run_tests():
tests.test_layers(layers)
tests.test_optimize(optimize)
tests.test_for_kitti_dataset(DATA_DIRECTORY)
tests.test_train_nn(train_nn)
def run():
""" Run a train a model and save output images resulting from the test image fed on the trained model """
# Get vgg model if we can't find it where it should be
helper.maybe_download_pretrained_vgg(DATA_DIRECTORY)
# A function to get batches
get_batches_fn = helper.gen_batch_function(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE)
with tf.Session() as session:
# Returns the three layers, keep probability and input layer from the vgg architecture
image_input, keep_prob, layer3, layer4, layer7 = load_vgg(session, VGG_PATH)
# The resulting network architecture from adding a decoder on top of the given vgg model
model_output = layers(layer3, layer4, layer7, NUMBER_OF_CLASSES)
# Returns the output logits, training operation and cost operation to be used
# - logits: each row represents a pixel, each column a class
# - train_op: function used to get the right parameters to the model to correctly label the pixels
# - cross_entropy_loss: function outputting the cost which we are minimizing, lower cost should yield higher accuracy
logits, train_op, cross_entropy_loss = optimize(model_output, correct_label, learning_rate, NUMBER_OF_CLASSES)
# Initialize all variables
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
# Train the neural network
train_nn(session, EPOCHS, BATCH_SIZE, get_batches_fn,
train_op, cross_entropy_loss, image_input,
correct_label, keep_prob, learning_rate)
# Run the model with the test images and save each painted output image (roads painted green)
helper.save_inference_samples(RUNS_DIRECTORY, DATA_DIRECTORY, session, IMAGE_SHAPE, logits, keep_prob, image_input)
#--------------------------
# MAIN
#--------------------------
if __name__ == "__main__":
run_tests()
run() # Run a train a model and save output images resulting from the test image fed on the trained model
print(all_training_losses)
|
StarcoderdataPython
|
58615
|
import pathlib
import typing
import urllib.parse
_PLUGIN_DIR = pathlib.Path(__file__).parent
PLUGIN_DIR = str(_PLUGIN_DIR)
CONFIGS_DIR = str(_PLUGIN_DIR.joinpath('configs'))
SCRIPTS_DIR = str(_PLUGIN_DIR.joinpath('scripts'))
def scan_sql_directory(root: str) -> typing.List[pathlib.Path]:
return [
path
for path in sorted(pathlib.Path(root).iterdir())
if path.is_file() and path.suffix == '.sql'
]
def connstr_replace_dbname(connstr: str, dbname: str) -> str:
"""Replace dbname in existing connection string."""
if connstr.endswith(' dbname='):
return connstr + dbname
if connstr.startswith('postgresql://'):
url = urllib.parse.urlparse(connstr)
url = url._replace(path=dbname) # pylint: disable=protected-access
return url.geturl()
raise RuntimeError(
f'Unsupported PostgreSQL connection string format {connstr!r}',
)
|
StarcoderdataPython
|
1742095
|
#!/usr/bin/env python
# stdlib imports
import os.path
# third party libraries
from configobj import ConfigObj
from validate import Validator, VdtTypeError
def __getCustomValidator():
'''
Return a Validator object with the custom types we have defined here.
Returns:
Validator object with custom types embedded.
'''
fdict = {
'file_type': __file_type,
'path_type': __path_type,
}
validator = Validator(fdict)
return validator
def __file_type(value):
'''
Describes a file_type from the ShakeMap config spec.
A file_type object is simply a string that must be a valid file on the
system.
Args:
value (str): Path to a file on the local system.
Returns:
str: Input string, if a valid file name.
'''
if not os.path.isfile(value):
raise VdtTypeError(value)
return value
def __path_type(value):
'''
Describes a path_type from the groundfailure config spec.
A path_type object is simply a string that must be a valid file OR
directory on the system.
Args:
value (str): Path to a file or directory on the local system.
Returns:
str: Input string, if a valid file/directory name.
'''
if not os.path.isfile(value) and not os.path.isdir(value):
raise VdtTypeError(value)
return value
def __filterResults(result):
# TODO: this function has a problem where some error messages are
# duplicated...?
errormsg = ''
for key, value in result.items():
if isinstance(value, dict):
tmpmsg = __filterResults(value)
errormsg += tmpmsg
else:
if not isinstance(value, bool):
errormsg += ("Parameter %s failed with error '%s'\n"
% (key, value.args))
else:
if not value:
errormsg += ("Parameter %s was not specified correctly.\n"
% (key))
return errormsg
def correct_config_filepaths(input_path, config):
"""
Takes an input filepath name and pre-pends it to all file locations within
the config file. Individual locations are put into the config. Don't have
to put entire filepath location for each layer. Works by looping over
config dictionary and subdictionary to fine locations named 'file'.
Args:
input_path (str): Path that needs to be appended to the front of all
the file names/paths in config.
config (ConfigObj): Object defining the model and its inputs.
Returns:
config dictionary with complete file paths.
"""
# Pull all other filepaths that need editing
for keys in config.keys():
outer_loop = keys
for keys in config[outer_loop].keys():
second_loop = keys
if hasattr(config[outer_loop][second_loop], 'keys') is False:
if second_loop == 'slopefile' or second_loop == 'file':
path_to_correct = config[outer_loop][second_loop]
config[outer_loop][second_loop] = \
os.path.join(input_path, path_to_correct)
else:
for keys in config[outer_loop][second_loop].keys():
third_loop = keys
if hasattr(config[outer_loop][second_loop][third_loop],
'keys') is False:
if third_loop == 'file' or third_loop == 'filepath':
path_to_correct = \
config[outer_loop][second_loop][third_loop]
config[outer_loop][second_loop][third_loop] = \
os.path.join(input_path, path_to_correct)
else:
for keys in config[outer_loop][second_loop][third_loop].keys():
fourth_loop = keys
if hasattr(config[outer_loop][second_loop][third_loop][fourth_loop], 'keys') is False:
if fourth_loop == 'file' or fourth_loop == 'filepath':
path_to_correct = config[outer_loop][second_loop][third_loop][fourth_loop]
config[outer_loop][second_loop][third_loop][fourth_loop] = os.path.join(
input_path, path_to_correct)
else:
for keys in config[outer_loop][second_loop][third_loop][fourth_loop].keys():
fifth_loop = keys
if fifth_loop == 'file' or fifth_loop == 'filepath':
path_to_correct = config[outer_loop][second_loop][third_loop][fourth_loop][fifth_loop]
config[outer_loop][second_loop][third_loop][fourth_loop][fifth_loop] = os.path.join(
input_path, path_to_correct)
return config
def validate(configfile, inputfilepath=None):
'''
Return a validated config object.
Args:
configfile (str): Config file to validate.
inputfilepath (str): Path to input file.
Returns:
A validated ConfigObj object or a dictionary of which
section/parameters failed validation.
'''
thispath = os.path.dirname(os.path.abspath(__file__))
configspec = os.path.join(thispath, 'configspec.ini')
config = ConfigObj(configfile, configspec=configspec)
if inputfilepath is not None:
config = correct_config_filepaths(config)
validator = __getCustomValidator()
result = config.validate(validator, preserve_errors=True)
if result is True:
return config
else:
errormsg = __filterResults(result)
raise VdtTypeError(errormsg)
return config
|
StarcoderdataPython
|
1765077
|
<reponame>optik/minicms
import time
from selenium import webdriver
from django.conf import settings
from django.test import LiveServerTestCase
class FrontendTestCase(LiveServerTestCase):
fixtures = ['fixtures/content.yaml',]
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
super().tearDown()
def test_pages_navigation(self):
# user navigates to site homepage
self.browser.get(self.live_server_url)
# user sees the proper page title
self.assertIn('MiniCMS Example Project', self.browser.title)
# user sees the homepage contents
main_content_element = self.browser.find_element_by_id('main_container')
self.assertIn('This is the content of the homepage', main_content_element.text)
# user clicks contact page link
contact_link = self.browser.find_element_by_id('contact_link')
contact_link.click()
# user notices URL change
new_url = self.browser.current_url
self.assertEqual(new_url, '/content/contact/')
# user sees the proper page title
self.assertIn('MiniCMS Example Project | Contact', self.browser.title)
# user sees the homepage contents
main_content_element = self.browser.find_element_by_id('main_container')
self.assertIn('This is a page with some contact information', main_content_element.text)
|
StarcoderdataPython
|
3328731
|
<filename>env.py<gh_stars>1-10
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App environment setup."""
import logging
import os
from google.appengine.api import app_identity
APP_NAME = app_identity.get_application_id()
SERVER_SOFTWARE = os.environ.get('SERVER_SOFTWARE', '')
DFP_API_VERSION = os.environ.get('DFP_API_VERSION', 'v201711')
DFP_APP_NAME = os.environ.get('DFP_APP_NAME', 'x5')
ASSET_SIZE_LIMIT = os.environ.get('ASSET_SIZE_LIMIT', 1000000)
DEBUG = False
logger = logging.getLogger('x5.env')
# Replace with your project id
if APP_NAME == 'replace-with-your-project-id':
# Put the client id for your production server here
CLIENT_ID = (
'replace-with-your-production-client-id.apps.googleusercontent.com'
)
# In case you wish to enable Google Analytics usage tracking,
# uncomment the following line and use your GA code
# GA_UA = 'UA-XXXXXXXX-X'
elif SERVER_SOFTWARE.startswith('Development'):
# Put your development server credentials here
DEBUG = bool(os.environ.get('DEBUG', True))
CLIENT_ID = (
'replace-with-your-development-client-id.apps.googleusercontent.com'
)
CLIENT_SECRET = 'replace-with-your-development-client-secret'
else:
CLIENT_ID = None
logger.critical('Not in development and application id %s unknown.', APP_NAME)
|
StarcoderdataPython
|
71442
|
import numpy as np
from scipy.signal import hilbert
from PyEMD.compact import filt6, pade6
# Visualisation is an optional module. To minimise installation, `matplotlib` is not added
# by default. Please install extras with `pip install -r requirement-extra.txt`.
try:
import pylab as plt
except ImportError:
pass
class Visualisation(object):
"""Simple visualisation helper.
This class is for quick and simple result visualisation.
"""
PLOT_WIDTH = 6
PLOT_HEIGHT_PER_IMF = 1.5
def __init__(self, emd_instance=None):
self.emd_instance = emd_instance
self.imfs = None
self.residue = None
if emd_instance is not None:
self.imfs, self.residue = self.emd_instance.get_imfs_and_residue()
def _check_imfs(self, imfs, residue, include_residue):
"""Checks for passed imfs and residue."""
imfs = imfs if imfs is not None else self.imfs
residue = residue if residue is not None else self.residue
if imfs is None:
raise AttributeError("No imfs passed to plot")
if include_residue and residue is None:
raise AttributeError("Requested to plot residue but no residue provided")
return imfs, residue
def plot_imfs(self, imfs=None, residue=None, t=None, include_residue=True):
"""Plots and shows all IMFs.
All parameters are optional since the `emd` object could have been passed when instantiating this object.
The residual is an optional and can be excluded by setting `include_residue=False`.
"""
imfs, residue = self._check_imfs(imfs, residue, include_residue)
num_rows, t_length = imfs.shape
num_rows += include_residue is True
t = t if t is not None else range(t_length)
fig, axes = plt.subplots(num_rows, 1, figsize=(self.PLOT_WIDTH, num_rows * self.PLOT_HEIGHT_PER_IMF))
if num_rows == 1:
axes = list(axes)
axes[0].set_title("Time series")
for num, imf in enumerate(imfs):
ax = axes[num]
ax.plot(t, imf)
ax.set_ylabel("IMF " + str(num + 1))
if include_residue:
ax = axes[-1]
ax.plot(t, residue)
ax.set_ylabel("Res")
# Making the layout a bit more pleasant to the eye
plt.tight_layout()
def plot_instant_freq(self, t, imfs=None, order=False, alpha=None):
"""Plots and shows instantaneous frequencies for all provided imfs.
The necessary parameter is `t` which is the time array used to compute the EMD.
One should pass `imfs` if no `emd` instances is passed when creating the Visualisation object.
Parameters
----------
order : bool (default: False)
Represents whether the finite difference scheme is
low-order (1st order forward scheme) or high-order (6th order
compact scheme). The default value is False (low-order)
alpha : float (default: None)
Filter intensity. Default value is None, which
is equivalent to `alpha` = 0.5, meaning that no filter is applied.
The `alpha` values must be in between -0.5 (fully active) and 0.5
(no filter).
"""
if alpha is not None:
assert -0.5 < alpha < 0.5, "`alpha` must be in between -0.5 and 0.5"
imfs, _ = self._check_imfs(imfs, None, False)
num_rows = imfs.shape[0]
imfs_inst_freqs = self._calc_inst_freq(imfs, t, order=order, alpha=alpha)
fig, axes = plt.subplots(num_rows, 1, figsize=(self.PLOT_WIDTH, num_rows * self.PLOT_HEIGHT_PER_IMF))
if num_rows == 1:
axes = fig.axes
axes[0].set_title("Instantaneous frequency")
for num, imf_inst_freq in enumerate(imfs_inst_freqs):
ax = axes[num]
ax.plot(t, imf_inst_freq)
ax.set_ylabel("IMF {} [Hz]".format(num + 1))
# Making the layout a bit more pleasant to the eye
plt.tight_layout()
def _calc_inst_phase(self, sig, alpha):
"""Extract analytical signal through the Hilbert Transform."""
analytic_signal = hilbert(sig) # Apply Hilbert transform to each row
if alpha is not None:
assert -0.5 < alpha < 0.5, "`alpha` must be in between -0.5 and 0.5"
real_part = np.array([filt6(row.real, alpha) for row in analytic_signal])
imag_part = np.array([filt6(row.imag, alpha) for row in analytic_signal])
analytic_signal = real_part + 1j * imag_part
phase = np.unwrap(np.angle(analytic_signal)) # Compute angle between img and real
if alpha is not None:
phase = np.array([filt6(row, alpha) for row in phase]) # Filter phase
return phase
def _calc_inst_freq(self, sig, t, order, alpha):
"""Extracts instantaneous frequency through the Hilbert Transform."""
inst_phase = self._calc_inst_phase(sig, alpha=alpha)
if order is False:
inst_freqs = np.diff(inst_phase) / (2 * np.pi * (t[1] - t[0]))
inst_freqs = np.concatenate((inst_freqs, inst_freqs[:, -1].reshape(inst_freqs[:, -1].shape[0], 1)), axis=1)
else:
inst_freqs = [pade6(row, t[1] - t[0]) / (2.0 * np.pi) for row in inst_phase]
if alpha is None:
return np.array(inst_freqs)
else:
return np.array([filt6(row, alpha) for row in inst_freqs]) # Filter freqs
def show(self):
plt.show()
if __name__ == "__main__":
from PyEMD import EMD
# Simple signal example
t = np.arange(0, 3, 0.01)
S = np.sin(13 * t + 0.2 * t ** 1.4) - np.cos(3 * t)
emd = EMD()
emd.emd(S)
imfs, res = emd.get_imfs_and_residue()
# Initiate visualisation with emd instance
vis = Visualisation(emd)
# Create a plot with all IMFs and residue
vis.plot_imfs(imfs=imfs, residue=res, t=t, include_residue=True)
# Create a plot with instantaneous frequency of all IMFs
vis.plot_instant_freq(t, imfs=imfs)
# Show both plots
vis.show()
|
StarcoderdataPython
|
1696859
|
<reponame>mmaaz60/ssl_for_fgvc
import sys
class Model:
"""
This class initiates the specified model.
"""
def __init__(self, config):
"""
Constructor, select the model specified in the configuration file
:param config: Configuration class object
"""
# Select the correct model
if config.cfg["model"]["name"] == "torchvision":
from model.torchvision import TorchVision as Model
elif config.cfg["model"]["name"] == "fgvc_resnet":
from model.fgvc_resnet import FGVCResnet as Model
elif config.cfg["model"]["name"] == "torchvision_ssl_rotation":
from model.torchvision_ssl_rotation import TorchvisionSSLRotation as Model
elif config.cfg["model"]["name"] == "fgvc_ssl_rotation":
from model.fgvc_ssl_rotation import FGVCSSLRotation as Model
elif config.cfg["model"]["name"] == "torchvision_ssl_pirl":
from model.torchvision_ssl_pirl import TorchVisionSSLPIRL as Model
elif config.cfg["model"]["name"] == "dcl":
from model.torchvision_ssl_dcl import TorchVisionSSLDCL as Model
else:
print(f"Please provide correct model to use in configuration. "
f"Available options are ['torchvision', 'fgvc_resnet', "
f"'torchvision_ssl_rotation', 'fgvc_ssl_rotation', 'torchvision_ssl_pirl', 'dcl']")
sys.exit(1)
# Initialize the selected DataLoader
self.model = Model(config)
def get_model(self):
"""
This function returns the selected model
"""
return self.model
|
StarcoderdataPython
|
1723178
|
DEFAULT_MOUNT_SHARE = "True"
MAX_SHARES_PER_FPG = 16
def create_metadata(backend, cpg, fpg, share_name, size,
readonly=False, nfs_options=None, comment='',
fsMode=None, fsOwner=None):
return {
'id': None,
'backend': backend,
'cpg': cpg,
'fpg': fpg,
'vfs': None,
'name': share_name,
'size': size,
'readonly': readonly,
'nfsOptions': nfs_options,
'protocol': 'nfs',
'clientIPs': [],
'comment': comment,
'fsMode': fsMode,
'fsOwner': fsOwner,
}
|
StarcoderdataPython
|
1605869
|
# -*- coding: utf-8 -*-
from pathlib import Path
from setuptools import find_packages, setup
README = (Path(__file__).parent / "README.md").read_text()
REQUIRES = ["pluggy>=1.0,<1.1", "robotframework>=4.0,<5.0"]
EXTRAS_REQUIRE = {"test": ["pytest>=5.2.2,<6.3.0" "black==21.12b0"]}
def get_version():
global_vars = {}
exec(Path("src/rf_network/version.py").read_text(), global_vars)
return global_vars["__version__"]
setup(
name="rf-network",
keywords=["rf-network", "robotframework", "network", "testing"],
license="MIT license",
version=get_version(),
author="<NAME>",
author_email="<EMAIL>",
python_requires=">=3.7",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
description=(
"A pluggable multi-vendor network connection library for RobotFramework"
),
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/ttafsir/rf-network",
packages=find_packages("src", exclude=("tests",)),
package_dir={"": "src"},
install_requires=REQUIRES,
include_package_data=True,
extras_require=EXTRAS_REQUIRE,
tests_require=["rf-network[test]"],
)
|
StarcoderdataPython
|
3285115
|
<gh_stars>10-100
# Copyright 2011 SRI International
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Created on Feb 23, 2011
@author: jklo
'''
import unittest
import logging
from stemx.tests import Publish
class ObtainTest(Publish):
def __init__(self):
self._log = logging.getLogger("ObtainTest");
self._server = None
pass
def setUp(self):
super(ObtainTest, self).setUp()
def tearDown(self):
super(ObtainTest, self).tearDown()
def testObtain(self):
doc_ID = self.test_goodPublish()
self._res
if __name__ == "__main__":
logging.basicConfig()
#import sys;sys.argv = ['', 'ObtainTest.testObtain']
unittest.main()
|
StarcoderdataPython
|
53510
|
<reponame>fei-protocol/checkthechain
from __future__ import annotations
import math
import typing
if typing.TYPE_CHECKING:
import aiohttp
import toolstr
url_template = 'https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&order=market_cap_desc&per_page=100&page={page}&sparkline=true&price_change_percentage=1h%2C24h%2C7d'
token_url_template = 'https://www.coingecko.com/en/coins/{name}'
async def async_get_market_data(
n: int,
) -> typing.Sequence[typing.Mapping[typing.Any, typing.Any]]:
import asyncio
import aiohttp
n_per_page = 100
n_pages = math.ceil(n / n_per_page)
async with aiohttp.ClientSession() as session:
coroutines = [async_get_page(session, p) for p in range(n_pages)]
pages = await asyncio.gather(*coroutines)
items = [item for page in pages for item in page]
return items[:n]
async def async_get_page(
session: aiohttp.ClientSession, p: int
) -> typing.List[typing.Any]:
url = url_template.format(page=p + 1)
async with session.get(url) as response:
page = await response.json()
if not isinstance(page, list):
raise Exception('bad page format')
return page
def color_polarity(value: int | float | None) -> str:
if value is None:
return ''
if value > 0:
return '#4eaf0a'
elif value < 0:
return '#e15241'
else:
return 'gray'
def print_market_data(
data: typing.Sequence[typing.Any],
verbose: bool,
include_links: bool = False,
height: int | None = None,
width: int | None = None,
) -> None:
if width is None:
width = 8
# create labels
labels = ['token', 'price', 'Δ 1H', 'Δ 24H', 'Δ 7D', 'volume', 'mkt cap']
if verbose:
labels.append('7D chart')
# create rows
rows: list[typing.Sequence[typing.Any]] = []
for item in data:
row = []
row.append(item['symbol'].upper())
row.append(item['current_price'])
# add price change cells
for key in [
'price_change_percentage_1h_in_currency',
'price_change_percentage_24h_in_currency',
'price_change_percentage_7d_in_currency',
]:
change = item[key]
row.append(change)
row.append(item['total_volume'])
row.append(item['market_cap'])
# add sparkline
if verbose:
from toolstr.charts import braille
sparkline = braille.create_braille_sparkline(
data=item['sparkline_in_7d']['price'],
# width=20,
width=width,
height=height,
)
row.append(sparkline)
rows.append(row)
if height is None:
height = 1
def get_row_color(r: int) -> str:
if height is None:
raise Exception('height not set')
datum = data[int(r / height)]
diff = (
datum['sparkline_in_7d']['price'][-1]
- datum['sparkline_in_7d']['price'][0]
)
return color_polarity(diff)
# print table
toolstr.print_multiline_table(
rows,
labels=labels,
column_gap=1,
# compact=True,
add_row_index=True,
separate_all_rows=False,
# max_table_width=os.get_terminal_size().columns,
vertical_justify='center',
column_style={
'Δ 1H': lambda context: 'bold ' + color_polarity(context['cell']),
'Δ 24H': lambda context: 'bold ' + color_polarity(context['cell']),
'Δ 7D': lambda context: 'bold ' + color_polarity(context['cell']),
'7D chart': lambda context: 'bold ' + get_row_color(context['r']),
},
column_format={
'price': {'decimals': 2, 'trailing_zeros': True, 'prefix': '$'},
'Δ 1H': {
'scientific': False,
'postfix': '%',
'decimals': 2,
'trailing_zeros': True,
},
'Δ 24H': {
'scientific': False,
'postfix': '%',
'decimals': 2,
'trailing_zeros': True,
},
'Δ 7D': {
'scientific': False,
'postfix': '%',
'decimals': 2,
'trailing_zeros': True,
},
'volume': {
'decimals': 1,
'trailing_zeros': True,
'prefix': '$',
'order_of_magnitude': True,
},
'mkt cap': {
'decimals': 1,
'trailing_zeros': True,
'prefix': '$',
'order_of_magnitude': True,
},
},
)
|
StarcoderdataPython
|
1665336
|
import ChromaPy32 as Chroma # Import the Chroma Module
from time import sleep
Keyboard = Chroma.Keyboard() # Initialize a new Keyboard Instance
RED = (255, 0, 0) # Initialize a new color by RGB (RED,GREEN,BLUE)
GREEN = (0, 255, 0)
for y in range(0, Keyboard.MaxRow): # Use Keyboard.MaxRow as an iteration border in a for-loop
Keyboard.setbyGrid(3, y, RED) # sets the whole fourth column to green
for x in range(0, Keyboard.MaxColumn): # Use Keyboard.MaxColumn as iteration border in a for-loop
Keyboard.setbyGrid(x, 2, GREEN) # sets the whole third row to green
Keyboard.applyGrid() # applies the Keyboard-Grid to the connected Keyboard
sleep(5)
|
StarcoderdataPython
|
57169
|
<reponame>BLannoo/Advent-of-Code-2021
from pathlib import Path
from day4.board import Board
def silver(input_file_path: Path) -> int:
number_sequence, boards = parse_input(input_file_path)
for number in number_sequence:
for board in boards:
board.mark(number)
if board.is_won():
return board.unmarked_sum() * number
def gold(input_file_path: Path) -> int:
number_sequence, boards = parse_input(input_file_path)
for number in number_sequence:
for board in boards.copy():
board.mark(number)
if board.is_won():
if len(boards) == 1:
return board.unmarked_sum() * number
boards.remove(board)
def parse_input(input_file_path):
split = input_file_path.read_text().split("\n\n")
number_sequence = [
int(number_str)
for number_str in split[0].split(",")
]
boards = [
Board.parse(board_description)
for board_description in split[1:]
]
return number_sequence, boards
|
StarcoderdataPython
|
1749776
|
from subprocess import Popen, PIPE, STDOUT
import sys
pomXml = """
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>de.zaunkoenigweg.eclipse</groupId>
<artifactId>mirror-eclipse-site</artifactId>
<version>1.0</version>
<packaging>pom</packaging>
<properties>
<tycho.version>1.0.0</tycho.version>
</properties>
<repositories>
<repository>
<id>oxygen</id>
<url>http://download.eclipse.org/releases/oxygen</url>
<layout>p2</layout>
</repository>
</repositories>
<build>
<plugins>
<plugin>
<groupId>org.eclipse.tycho.extras</groupId>
<artifactId>tycho-p2-extras-plugin</artifactId>
<version>${tycho.version}</version>
<executions>
<execution>
<id>mirror</id>
<phase>prepare-package</phase>
<goals>
<goal>mirror</goal>
</goals>
<configuration>
<source>
<repository>
<url>%(siteUrl)s</url>
<layout>p2</layout>
</repository>
</source>
<destination>${project.build.directory}/mirror/</destination>
<ius>
%(iuConfiguration)s </ius>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<version>2.4.1</version>
<configuration>
<descriptors>
<descriptor>assembly.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
"""
assemblyXml="""<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
<id>mirror</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<fileSets>
<fileSet>
<directory>${project.build.directory}/mirror</directory>
<outputDirectory>/</outputDirectory>
</fileSet>
</fileSets>
</assembly>"""
iusXml = """ <iu>
<id>%(iu)s</id>
<version>%(version)s</version>
</iu>
"""
def execute(command, directory='.', verbose=False):
p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, cwd=directory, close_fds=True)
for c in iter(lambda: p.stdout.read(1), ''):
if verbose:
sys.stdout.write(c)
rc = p.poll()
if(rc and rc!=0):
sys.exit(rc)
def prompt(msg, allowEmpty=False):
text = raw_input(msg)
if(not allowEmpty):
while(not text.strip()):
text = raw_input(msg)
return text.strip()
print """
This mirroring tool helps you generating a zipped update site from a given site URL.
It dynamically creates a Maven project and uses a tycho plugin to mirror the update site.
You have to provide some params."""
print """
Site Name:
==========
The site name is used as the name for the zip file that contains the mirrored update site.
Example: "pmd"
"""
siteName = prompt("site name: ")
print """
update site URL:
================
The URL of the update site. You usually obtain it from the plugin's website.
Example: "https://dl.bintray.com/pmd/pmd-eclipse-plugin/updates/"
"""
siteUrl = prompt("update site URL: ")
print """
IUs (installable units):
========================
An update site can consist of multiple installable units (usually Eclipse features).
You can provide 0..n IU names. If you provide none, all available IUs will be mirrored,
otherwise the given subset.
Example: "net.sourceforge.pmd.eclipse.feature.group"
"""
ius = []
iu = prompt("IU # 1 (blank for all IUs): ", allowEmpty=True)
while(iu.strip()):
ius.append(iu)
iu = prompt("IU # %d (blank to end list): " % (len(ius)+1), allowEmpty=True)
versions = {}
for iu in ius:
version = prompt("Version for IU '%s' (blank for latest): " % iu, allowEmpty=True)
versions[iu]=version
print """
The script will now generate a Maven project in the subfolder mirror-eclipse-site
and execute it to mirror and assemble the site.
"""
raw_input("ENTER")
iuConfiguration = ""
for iu in ius:
iuConfiguration += (iusXml % {'iu':iu, 'version': versions[iu]})
execute("rm -rf mirror-eclipse-site/")
execute("mkdir mirror-eclipse-site")
data = {'siteName':siteName, 'siteUrl':siteUrl, 'iuConfiguration':iuConfiguration}
with open("mirror-eclipse-site/pom.xml", "w") as pomFile:
pomFile.write(pomXml % data)
with open("mirror-eclipse-site/assembly.xml", "w") as pomFile:
pomFile.write(assemblyXml)
execute("cd mirror-eclipse-site")
execute("mvn package", "mirror-eclipse-site", verbose=True)
execute("cp mirror-eclipse-site/target/mirror-eclipse-site-1.0-mirror.zip %s.zip" % siteName)
execute("rm -rf mirror-eclipse-site/")
print """
If the Maven project was completed successfully, you find the update site in %s.zip.
""" % siteName
|
StarcoderdataPython
|
3248198
|
<reponame>alexbarten/adventofcode2021<filename>src/december06/compute_fishschool_growth_smart.py
def main():
fishschool = fishschool_to_dict("src/december06/fish.txt")
grown_school = reduce_and_grow(fishschool, 256)
number_of_fish = 0
for value in grown_school.values():
number_of_fish += value
print("The number of fish is ", number_of_fish)
def fishschool_to_dict(file):
fishschoolfile = open(file, "r")
fishschool_line = fishschoolfile.readline().replace("\n", "")
fishschool = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0,
5: 0, 6: 0, 7: 0, 8: 0}
for fish in fishschool_line:
if fish != ",":
fishschool[int(fish)] += 1
return fishschool
def reduce_and_grow(fishschool, days):
for day in range(days):
new_babies = fishschool[0]
for i in range(8):
fishschool[i] = fishschool[i + 1]
fishschool[8] = new_babies
fishschool[6] += new_babies
return fishschool
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
127033
|
"""
--- Day 2: 1202 Program Alarm ---
-- Part One --
An Intcode program is a list of integers separated by commas (like 1,0,0,3,99).
To run one, start by looking at the first integer (called position 0).
Here, you will find an opcode - either 1, 2, or 99. The opcode indicates what to do;
for example, 99 means that the program is finished and should immediately halt.
Encountering an unknown opcode means something went wrong.
Opcode 1 adds together numbers read from two positions and stores the result
in a third position. The three integers immediately after the opcode tell you
these three positions - the first two indicate the positions from which you should
read the input values, and the third indicates the position at which the output
should be stored.
For example, if your Intcode computer encounters 1,10,20,30, it should read the
values at positions 10 and 20, add those values, and then overwrite the value at
position 30 with their sum.
Opcode 2 works exactly like opcode 1, except it multiplies the two inputs instead
of adding them. Again, the three integers after the opcode indicate where the inputs
and outputs are, not their values.
(Opcode 99 halts the program.)
Once you're done processing an opcode, move to the next one by stepping forward 4 positions.
Once you have a working computer, the first step is to restore the gravity assist program
(your puzzle input) to the "1202 program alarm" state it had just before the last computer
caught fire. To do this, before running the program, replace position 1 with the value 12 and
replace position 2 with the value 2. What value is left at position 0 after the program halts?
-- Part Two --
"With terminology out of the way, we're ready to proceed. To complete the gravity assist,
you need to determine what pair of inputs produces the output 19690720."
The inputs should still be provided to the program by replacing the values at addresses 1 and 2,
just like before. In this program, the value placed in address 1 is called the noun, and the
value placed in address 2 is called the verb. Each of the two input values will be between 0 and 99,
inclusive.
Once the program has halted, its output is available at address 0, also just like before.
Each time you try a pair of inputs, make sure you first reset the computer's memory to the values
in the program (your puzzle input) - in other words, don't reuse memory from a previous attempt.
Find the input noun and verb that cause the program to produce the output 19690720.
What is 100 * noun + verb? (For example, if noun=12 and verb=2, the answer would be 1202.)
"""
def get_init():
a = open("intcode.txt", "r")
b = (a.readline()).split(",")
a.close()
return b
def opcode_one(arr, i):
arr[int('%s'%(arr[i+3]))] = int(arr[int('%s'%(arr[i+1]))]) + int(arr[int('%s'%(arr[i+2]))])
def opcode_two(arr, i):
arr[int('%s'%(arr[i+3]))] = int(arr[int('%s'%(arr[i+1]))]) * int(arr[int('%s'%(arr[i+2]))])
def part_one():
ics = get_init()
i = 0
while i < (len(ics)):
if ics[i] == "1":
opcode_one(ics, i)
i += 4
elif ics[i] == "2":
opcode_two(ics, i)
i += 4
elif ics[i] == "99":
return ics[0]
def part_two():
for k in range(0,100):
for j in range(0, 100):
ics = get_init()
ics[1] = '%s'%(k)
ics[2] = '%s'%(j)
i = 0
while i < (len(ics)):
if ics[i] == "1":
opcode_one(ics, i)
i += 4
elif ics[i] == "2":
opcode_two(ics, i)
i += 4
elif ics[i] == "99":
break
if ics[0] == 19690720:
return(100*k + j)
if __name__ == '__main__':
print(part_one())
print(part_two())
|
StarcoderdataPython
|
80476
|
<reponame>yingcuhk/Distributed-and-Asynchronos-SGD<filename>src/LG.py
mport gzip
import os
import sys
import numpy as np
import cPickle
import tensorflow as tf
class LogisticRegression(object):
def __init__(self):
self.X = tf.placeholder("float", [None, 784])
self.Y = tf.placeholder("float", [None, 10])
self.W = tf.Variable(tf.random_normal([28 * 28, 10], stddev=0.01))
self.b = tf.Variable(tf.zeros([10, ]))
self.model = self.create_model(self.X, self.W, self.b)
# logistic and cal error
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.model, self.Y))
# gradient descent method to minimize error
self.train = tf.train.GradientDescentOptimizer(0.1).minimize(self.cost)
# calculate the max pos each row
self.predict = tf.argmax(self.model, 1)
def create_model(self, X, w, b):
# wx + b
return tf.add(tf.matmul(X, w), b)
def load_data(self):
"""
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
"""
return train_set, valid_set, test_set
def dense_to_one_hot(self, labels_dense, num_classes=10):
# ont hot copy from https://github.com/nlintz/TensorFlow-Tutorials
# also can use sklearn preprocessing OneHotEncoder()
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def run(self):
train_set, valid_set, test_set = self.load_data()
train_X, train_Y = train_set
test_X, test_Y = test_set
train_Y = self.dense_to_one_hot(train_Y)
test_Y = self.dense_to_one_hot(test_Y)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(100):
for start, end in zip(range(0, len(train_X), 128), range(128, len(train_X), 128)):
sess.run(self.train, feed_dict={self.X: train_X[start:end], self.Y: train_Y[start:end]})
print i, np.mean(np.argmax(test_Y, axis=1) == sess.run(self.predict, feed_dict={self.X: test_X, self.Y: test_Y}))
sess.close()
if __name__ == '__main__':
lr_model = LogisticRegression()
lr_model.run()
|
StarcoderdataPython
|
136232
|
import subprocess
import json
if __name__ == '__main__':
filename ="/tmp/test.json"
f = open(filename,"w")
result = subprocess.check_output(['kubectl','get','pods','-o','json']).rstrip()
f.write(result)
f.close()
#
f = open(filename,"r")
data = json.load(f)
#print(data)
print data['items'][0]['status']['conditions']
|
StarcoderdataPython
|
52234
|
<reponame>adger-me/you-get
#!/usr/bin/env python
''' WIP
def main():
script_main('you-get', any_download, any_download_playlist)
if __name__ == "__main__":
main()
'''
|
StarcoderdataPython
|
82746
|
from DesignPatterns.Factory.Sedan import Sedan
from DesignPatterns.Factory.SUV import SUV
from DesignPatterns.Factory.Hatchback import Hatchback
class VehicleFactory:
def generate(self, vehicle_type):
if vehicle_type is 'Sedan':
return Sedan().produce_vehicle()
elif vehicle_type is 'SUV':
return SUV().produce_vehicle()
elif vehicle_type is 'Hatchback':
return Hatchback().produce_vehicle()
|
StarcoderdataPython
|
4804390
|
<gh_stars>0
import data_io
import pandas as pd
import requests
from typing import List
from debug_tools import *
"""
Function that takes as input a job title and returns a dataframe
with the average salary by state for that job title.
"""
def scrape_salary_table_for_job_title(job_name: str) -> pd.DataFrame:
if(job_name):
query = job_name.replace(" ", "-")
else:
return False
job_url = "https://ziprecruiter.com/Salaries/What-Is-the-Average-" + query + "-Salary-by-State"
job_response = requests.get(job_url, timeout=10)
dprint(job_response)
if "ind=null" in job_response.url or "Moved Permanently" in job_response.url:
return False
elif job_response.status_code == 301:
return False
else:
job_text: List[pd.DataFrame] = pd.read_html(job_response.text)
if(len(job_text) > 0):
job_table: pd.DataFrame = pd.concat(job_text)
else:
return False
return job_table
"""
Static Function to pull Value of a Dollar table from patriotsoftware which
we join and multiply on the salary by state to find dollar adjusted jobs
* TODO: cache this somewhere
"""
def scrape_ppp_table() -> pd.DataFrame:
ppp_url = "https://www.patriotsoftware.com/blog/accounting/average-cost-living-by-state/"
ppp_response = requests.get(ppp_url, timeout=10)
ppp_text: List[pd.DataFrame] = pd.read_html(ppp_response.text, header=0)
ppp_table: pd.DataFrame = pd.concat(ppp_text)
return ppp_table
|
StarcoderdataPython
|
54134
|
<filename>Chapter 08/Chap08_Example8.24.py
class Student:
def __init__(self):
self.name = 'Mohan'
self.age = 10
self.country = 'India'
def mydelete(self):
del self.age
myobj1 = Student()
print("Before deleting: ")
print(myobj1.__dict__)
del myobj1.country # deleting outside of the class
print("After deleting outside of the class: ")
print(myobj1.__dict__)
print("After deleting from inside of the class")
myobj1.mydelete()
print(myobj1.__dict__)
|
StarcoderdataPython
|
3228012
|
<filename>Pointnet/pointnet_segmentation.py<gh_stars>0
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from Pointnet import PointNet
class PointNetSeg(nn.Module):
def __init__(self, k = 3,Feature_Transform = True ):
super(PointNetCls,self).__init__()
self.k = k
self.pointnet = PointNet(global_feat = False, Feature_Transform= True) #bx1024
self.conv1 = torch.nn.Conv1d(1024,512,1)
self.conv2 = torch.nn.Conv1d(512,256,1)
self.conv3 = torch.nn.Conv1d(256,128,1)
self.conv4 = torch.nn.Conv1d(128,self.k,1)
#self.rl = nn.ReLU()
self.bn1= nn.BatchNorm1d(512)
self.bn2= nn.BatchNorm1d(256)
self.bn3= nn.BatchNorm1d(128)
self.dropout = nn.Dropout(p=0.3)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self,x):
batchsize= x.size()[0]
no_of_points = x.size()[2]
x = self.pointnet(x)
x= F.relu(self.bn1(self.conv1(x)))
x= F.relu(self.bn2(self.conv2(x)))
x= F.relu(self.bn3(self.conv3(x)))
x= self.conv4(x)
x = x.transpose(2,1).contiguous()
x = F.log_softmax(x.view(-1,self.k), dim=-1)
x = x.view(batchsize, no_of_points, self.k)
return x
|
StarcoderdataPython
|
186223
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
from queue import PriorityQueue
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
class Wrapper():
def __init__(self, node):
self.node = node
def __lt__(self, other):
return self.node.val < other.node.val
head = currentNode = ListNode(0)
priorityQueue = PriorityQueue()
for linkedList in lists:
if linkedList:
priorityQueue.put(Wrapper(linkedList))
while not priorityQueue.empty():
node = priorityQueue.get().node
currentNode.next = node
currentNode = currentNode.next
node = node.next
if node:
priorityQueue.put(Wrapper(node))
return head.next
|
StarcoderdataPython
|
4824734
|
from optparse import OptionParser
import pprint
import pymongo
def main():
parser = OptionParser()
parser.add_option("-s", "--seconds-running", dest="seconds", default=None,
help="search for current Ops where the secs_running is greater than or equal to this value")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="verbose output on each operation found")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False,
help="quiet ouput. only prints operation ID values.")
parser.add_option("-o", "--opid", dest="opid", default=None,
help="specific operation to get details on.")
(options, args) = parser.parse_args()
seconds = 0
verbose = False
quiet = False
opid = None
if options.seconds is not None:
seconds = options.seconds
if options.verbose:
verbose = True
if options.quiet:
quiet = True
if options.opid is not None:
opid = options.opid
conn = pymongo.MongoClient()
all_ops = conn['admin']['$cmd.sys.inprog'].find_one('inprog')['inprog']
sync_ops = []
active_ops = []
for op in all_ops:
if op['op'] == "query":
if op['query'].has_key('writebacklisten'):
sync_ops.append(op)
elif op.has_key('secs_running'):
if op['ns'] != "local.oplog.rs":
if int(op['secs_running']) >= int(seconds):
if opid is not None:
if opid == op['opid']:
active_ops.append(op)
else:
active_ops.append(op)
if verbose:
print "SyncOps found: %d" % len(sync_ops)
print "Operations found: %d" % len(active_ops)
for op in active_ops:
if options.verbose:
pprint.pprint(op)
elif quiet:
print op['opid']
else:
print "ID: %s" % op['opid']
print "\tSeconds Running: %s" % op['secs_running']
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1704793
|
<gh_stars>0
""" graph -> InChI string conversion
"""
from ._to_inchi import with_atom_inchi_numbers
__all__ = ['with_atom_inchi_numbers']
|
StarcoderdataPython
|
1689236
|
import redis
class Config(object):
"""配置信息"""
# 随机加密盐值,随机即可
# SECRET_KEY = "<KEY>"
SECRET_KEY = ""
AUTH_SALT = "sYjLRzKQG4vra"
EXPIRES_IN = 3600
# 文件保存路径
UPLOAD_FOLDER = '/static'
# 数据库
# SQLALCHEMY_DATABASE_URI = "mysql://root:[email protected]:3307/metaphysics"
SQLALCHEMY_DATABASE_URI = ""
SQLALCHEMY_TRACK_MODIFICATIONS = True
# redis
REDIS_HOST = "192.168.126.131"
REDIS_PORT = "6379"
#
# # flask_session
SESSION_TYPE = "redis"
SESSION_REDIS = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
SESSION_USE_SIGNER = True # 对cookie中的session_id是否隐藏
PERMANENT_SESSION_LIFETIME = 86400 # 设置session过期的时间,单位s
class DevelopmentConfig(Config):
"""开发模式的配置信息"""
DEBUG = True
class ProductionConfig(Config):
"""生产环境配置信息"""
DEBUG = False
config_map = {
'develop': DevelopmentConfig,
'product': ProductionConfig
}
|
StarcoderdataPython
|
3296630
|
<reponame>RSB4760/apq8016_external_vixl<gh_stars>0
#!/usr/bin/env python2.7
# Copyright 2014, ARM Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ARM Limited nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import argparse
import re
import platform
import subprocess
import multiprocessing
import time
import util
from printer import EnsureNewLine, Print, UpdateProgress
def BuildOptions():
result = argparse.ArgumentParser(
description =
'''This tool runs each test reported by $TEST --list (and filtered as
specified). A summary will be printed, and detailed test output will be
stored in log/$TEST.''',
# Print default values.
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
result.add_argument('filters', metavar='filter', nargs='*',
help='Run tests matching all of the (regexp) filters.')
result.add_argument('--runner', action='store', required=True,
help='The test executable to run.')
result.add_argument('--coloured_trace', action='store_true',
help='''Pass --coloured_trace to the test runner. This
will put colour codes in the log files. The
coloured output can be viewed by "less -R", for
example.''')
result.add_argument('--debugger', action='store_true',
help='''Pass --debugger to test, so that the debugger is
used instead of the simulator. This has no effect
when running natively.''')
result.add_argument('--verbose', action='store_true',
help='Print verbose output.')
result.add_argument('--jobs', '-j', metavar='N', type=int, nargs='?',
default=1, const=multiprocessing.cpu_count(),
help='''Runs the tests using N jobs. If the option is set
but no value is provided, the script will use as many jobs
as it thinks useful.''')
sim_default = 'off' if platform.machine() == 'aarch64' else 'on'
result.add_argument('--simulator', action='store', choices=['on', 'off'],
default=sim_default,
help='Explicitly enable or disable the simulator.')
return result.parse_args()
def VerbosePrint(verbose, string):
if verbose:
Print(string)
# A class representing an individual test.
class Test:
def __init__(self, name, runner, debugger, coloured_trace, verbose):
self.name = name
self.runner = runner
self.debugger = debugger
self.coloured_trace = coloured_trace
self.verbose = verbose
self.logpath = os.path.join('log', os.path.basename(self.runner))
if self.debugger:
basename = name + '_debugger'
else:
basename = name
self.logout = os.path.join(self.logpath, basename + '.stdout')
self.logerr = os.path.join(self.logpath, basename + '.stderr')
if not os.path.exists(self.logpath): os.makedirs(self.logpath)
# Run the test.
# Use a thread to be able to control the test.
def Run(self):
command = \
[self.runner, '--trace_sim', '--trace_reg', '--trace_write', self.name]
if self.coloured_trace:
command.append('--coloured_trace')
if self.debugger:
command.append('--debugger')
VerbosePrint(self.verbose, '==== Running ' + self.name + '... ====')
sys.stdout.flush()
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Get the output and return status of the test.
stdout, stderr = process.communicate()
retcode = process.poll()
# Write stdout and stderr to the log.
with open(self.logout, 'w') as f: f.write(stdout)
with open(self.logerr, 'w') as f: f.write(stderr)
if retcode == 0:
# Success.
# We normally only print the command on failure, but with --verbose we
# should also print it on success.
VerbosePrint(self.verbose, 'COMMAND: ' + ' '.join(command))
VerbosePrint(self.verbose, 'LOG (stdout): ' + self.logout)
VerbosePrint(self.verbose, 'LOG (stderr): ' + self.logerr + '\n')
else:
# Failure.
Print('--- FAILED ' + self.name + ' ---')
Print('COMMAND: ' + ' '.join(command))
Print('LOG (stdout): ' + self.logout)
Print('LOG (stderr): ' + self.logerr + '\n')
return retcode
# Scan matching tests and return a test manifest.
def ReadManifest(runner, filters = [],
debugger = False, coloured_trace = False, verbose = False):
status, output = util.getstatusoutput(runner + ' --list')
if status != 0: util.abort('Failed to list all tests')
names = output.split()
for f in filters:
names = filter(re.compile(f).search, names)
return map(lambda x:
Test(x, runner, debugger, coloured_trace, verbose), names)
# Shared state for multiprocessing. Ideally the context should be passed with
# arguments, but constraints from the multiprocessing module prevent us from
# doing so: the shared variables (multiprocessing.Value) must be global, or no
# work is started. So we abstract some additional state into global variables to
# simplify the implementation.
# Read-write variables for the workers.
n_tests_passed = multiprocessing.Value('i', 0)
n_tests_failed = multiprocessing.Value('i', 0)
# Read-only for workers.
n_tests = None
start_time = None
verbose_test_run = None
test_suite_name = ''
def RunTest(test):
UpdateProgress(start_time, n_tests_passed.value, n_tests_failed.value,
n_tests, verbose_test_run, test.name, test_suite_name)
# Run the test and update the statistics.
retcode = test.Run()
if retcode == 0:
with n_tests_passed.get_lock(): n_tests_passed.value += 1
else:
with n_tests_failed.get_lock(): n_tests_failed.value += 1
# Run all tests in the manifest.
# This function won't run in parallel due to constraints from the
# multiprocessing module.
__run_tests_lock__ = multiprocessing.Lock()
def RunTests(manifest, jobs = 1, verbose = False, debugger = False,
progress_prefix = ''):
global n_tests
global start_time
global verbose_test_run
global test_suite_name
with __run_tests_lock__:
# Reset the counters.
n_tests_passed.value = 0
n_tests_failed.value = 0
verbose_test_run = verbose
test_suite_name = progress_prefix
n_tests = len(manifest)
if n_tests == 0:
Print('No tests to run.')
return 0
VerbosePrint(verbose, 'Running %d tests...' % (n_tests))
start_time = time.time()
pool = multiprocessing.Pool(jobs)
# The '.get(9999999)' is workaround to allow killing the test script with
# ctrl+C from the shell. This bug is documented at
# http://bugs.python.org/issue8296.
work = pool.map_async(RunTest, manifest).get(9999999)
done_message = '== Done =='
UpdateProgress(start_time, n_tests_passed.value, n_tests_failed.value,
n_tests, verbose, done_message, progress_prefix)
return n_tests_failed.value # 0 indicates success
if __name__ == '__main__':
# $ROOT/tools/test.py
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
# Parse the arguments.
args = BuildOptions()
# Find a valid path to args.runner (in case it doesn't begin with './').
args.runner = os.path.join('.', args.runner)
if not os.access(args.runner, os.X_OK):
print "'" + args.test + "' is not executable or does not exist."
sys.exit(1)
# List all matching tests.
manifest = ReadManifest(args.runner, args.filters,
args.debugger, args.coloured_trace, args.verbose)
# Run the tests.
status = RunTests(manifest, jobs=args.jobs,
verbose=args.verbose, debugger=args.debugger)
EnsureNewLine()
sys.exit(status)
|
StarcoderdataPython
|
1675011
|
"""
builtin_bracket.py
"""
from __future__ import print_function
from _devbuild.gen.id_kind_asdl import Id
from _devbuild.gen.runtime_asdl import value, quote_e, quote_t
from _devbuild.gen.syntax_asdl import (
word, word_e, word_t, word__String,
bool_expr,
)
from _devbuild.gen.types_asdl import lex_mode_e
from asdl import runtime
from core import error
from core.pyerror import p_die, log
from core import vm
from frontend import match
from osh import sh_expr_eval
from osh import bool_parse
from osh import word_parse
from osh import word_eval
_ = log
from typing import cast, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.runtime_asdl import cmd_value__Argv, value__Str
from _devbuild.gen.syntax_asdl import word__String, bool_expr_t
from _devbuild.gen.types_asdl import lex_mode_t
from core.ui import ErrorFormatter
from core import optview
from core import state
class _StringWordEmitter(word_parse.WordEmitter):
"""For test/[, we need a word parser that returns String.
The BoolParser calls word_.BoolId(w), and deals with Kind.BoolUnary,
Kind.BoolBinary, etc. This is instead of Compound/Token (as in the
[[ case.
"""
def __init__(self, cmd_val):
# type: (cmd_value__Argv) -> None
self.cmd_val = cmd_val
self.i = 0
self.n = len(cmd_val.argv)
def ReadWord(self, unused_lex_mode):
# type: (lex_mode_t) -> word__String
"""Interface for bool_parse.py.
TODO: This should probably be word_t
"""
if self.i == self.n:
# Does it make sense to define Eof_Argv or something?
# TODO: Add a way to show this location. Show 1 char past the right-most
# spid of the last word? But we only have the left-most spid.
w = word.String(Id.Eof_Real, '', runtime.NO_SPID)
return w
#log('ARGV %s i %d', self.argv, self.i)
s = self.cmd_val.argv[self.i]
left_spid = self.cmd_val.arg_spids[self.i]
self.i += 1
# default is an operand word
id_int = match.BracketUnary(s)
if id_int == Id.Undefined_Tok:
id_int = match.BracketBinary(s)
if id_int == Id.Undefined_Tok:
id_int = match.BracketOther(s)
id_ = Id.Word_Compound if id_int == -1 else id_int
# NOTE: We only have the left spid now. It might be useful to add the
# right one.
w = word.String(id_, s, left_spid)
return w
def Read(self):
# type: () -> word__String
"""Interface used for special cases below."""
return self.ReadWord(lex_mode_e.ShCommand)
def Peek(self, offset):
# type: (int) -> str
"""For special cases."""
return self.cmd_val.argv[self.i + offset]
def Rewind(self, offset):
# type: (int) -> None
"""For special cases."""
self.i -= offset
class _WordEvaluator(word_eval.StringWordEvaluator):
def EvalWordToString(self, w, quote_kind=quote_e.Default):
# type: (word_t, quote_t) -> value__Str
# do_fnmatch: for the [[ == ]] semantics which we don't have!
# I think I need another type of node
# Maybe it should be BuiltinEqual and BuiltinDEqual? Parse it into a
# different tree.
assert w.tag_() == word_e.String
string_word = cast(word__String, w)
return value.Str(string_word.s)
def _TwoArgs(w_parser):
# type: (_StringWordEmitter) -> bool_expr_t
"""Returns an expression tree to be evaluated."""
w0 = w_parser.Read()
w1 = w_parser.Read()
if w0.s == '!':
return bool_expr.LogicalNot(bool_expr.WordTest(w1))
unary_id = match.BracketUnary(w0.s)
if unary_id == Id.Undefined_Tok:
# TODO:
# - separate lookup by unary
p_die('Expected unary operator, got %r (2 args)', w0.s, word=w0)
return bool_expr.Unary(unary_id, w1)
def _ThreeArgs(w_parser):
# type: (_StringWordEmitter) -> bool_expr_t
"""Returns an expression tree to be evaluated."""
w0 = w_parser.Read()
w1 = w_parser.Read()
w2 = w_parser.Read()
# NOTE: Order is important here.
binary_id = match.BracketBinary(w1.s)
if binary_id != Id.Undefined_Tok:
return bool_expr.Binary(binary_id, w0, w2)
if w1.s == '-a':
return bool_expr.LogicalAnd(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w1.s == '-o':
return bool_expr.LogicalOr(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w0.s == '!':
w_parser.Rewind(2)
child = _TwoArgs(w_parser)
return bool_expr.LogicalNot(child)
if w0.s == '(' and w2.s == ')':
return bool_expr.WordTest(w1)
p_die('Expected binary operator, got %r (3 args)', w1.s, word=w1)
class Test(vm._Builtin):
def __init__(self, need_right_bracket, exec_opts, mem, errfmt):
# type: (bool, optview.Exec, state.Mem, ErrorFormatter) -> None
self.need_right_bracket = need_right_bracket
self.exec_opts = exec_opts
self.mem = mem
self.errfmt = errfmt
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
"""The test/[ builtin.
The only difference between test and [ is that [ needs a matching ].
"""
if self.need_right_bracket: # Preprocess right bracket
strs = cmd_val.argv
if not strs or strs[-1] != ']':
self.errfmt.Print_('missing closing ]', span_id=cmd_val.arg_spids[0])
return 2
# Remove the right bracket
cmd_val.argv.pop()
cmd_val.arg_spids.pop()
w_parser = _StringWordEmitter(cmd_val)
w_parser.Read() # dummy: advance past argv[0]
b_parser = bool_parse.BoolParser(w_parser)
# There is a fundamental ambiguity due to poor language design, in cases like:
# [ -z ]
# [ -z -a ]
# [ -z -a ] ]
#
# See posixtest() in bash's test.c:
# "This is an implementation of a Posix.2 proposal by <NAME>."
# It dispatches on expressions of length 0, 1, 2, 3, 4, and N args. We do
# the same here.
#
# Another ambiguity:
# -a is both a unary prefix operator and an infix operator. How to fix this
# ambiguity?
bool_node = None # type: bool_expr_t
n = len(cmd_val.argv) - 1
try:
if n == 0:
return 1 # [ ] is False
elif n == 1:
w = w_parser.Read()
bool_node = bool_expr.WordTest(w)
elif n == 2:
bool_node = _TwoArgs(w_parser)
elif n == 3:
bool_node = _ThreeArgs(w_parser)
if n == 4:
a0 = w_parser.Peek(0)
if a0 == '!':
w_parser.Read() # skip !
child = _ThreeArgs(w_parser)
bool_node = bool_expr.LogicalNot(child)
elif a0 == '(' and w_parser.Peek(3) == ')':
w_parser.Read() # skip ')'
bool_node = _TwoArgs(w_parser)
else:
pass # fallthrough
if bool_node is None:
bool_node = b_parser.ParseForBuiltin()
except error.Parse as e:
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2
# mem: Don't need it for BASH_REMATCH? Or I guess you could support it
word_ev = _WordEvaluator()
bool_ev = sh_expr_eval.BoolEvaluator(self.mem, self.exec_opts, None, self.errfmt)
# We want [ a -eq a ] to always be an error, unlike [[ a -eq a ]]. This is a
# weird case of [[ being less strict.
bool_ev.Init_AlwaysStrict()
bool_ev.word_ev = word_ev
bool_ev.CheckCircularDeps()
try:
b = bool_ev.EvalB(bool_node)
except error._ErrorWithLocation as e:
# We want to catch e_die() and e_strict(). Those are both FatalRuntime
# errors now, but it might not make sense later.
# NOTE: This doesn't seem to happen. We have location info for all
# errors that arise out of [.
#if not e.HasLocation():
# raise
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2 # 1 means 'false', and this usage error is like a parse error.
status = 0 if b else 1
return status
|
StarcoderdataPython
|
1640553
|
<reponame>angad11121/red-eye-meme
from flask import Flask, request, render_template, url_for, redirect
from PIL import Image
import numpy as np
import os
import cv2
from gaze_tracking import GazeTracking
from werkzeug.utils import secure_filename
ALLOWED_EXTENSIONS = {'jpg', 'jpeg', 'png'}
DIRNAME = os.path.dirname(__file__)
UPLOAD_FOLDER = os.path.join(DIRNAME,"source_img")
# Flask instantiation
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
count = 0
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSIONS
def imagecov(photoname, relative_eye_size=1.5):
global count
'''
Keep the image in the folder source_image and
put in the name of image in photoname
'''
photoname = photoname
sourcename = DIRNAME + '/source_img/' + photoname
finalname = DIRNAME + '/static/' + str(count)+".jpg"
'''
You can change the relative eye size to optimize the image further
'''
# relative_eye_size = 1.5
gaze = GazeTracking()
frame = cv2.imread(sourcename)
# cv2.imshow("Demo1", frame)
gaze.refresh(frame)
frame = gaze.annotated_frame()
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
try:
distance = (left_pupil[0] - right_pupil[0]) * (left_pupil[0] - right_pupil[0]) + (left_pupil[1] - right_pupil[1]) * (left_pupil[1] - right_pupil[1])
except:
return False
distance = np.sqrt(distance)
print(distance)
face_image = Image.open(sourcename)
eye_image = Image.open(DIRNAME + '/source_img/redeye.png')
eye_image = eye_image.resize((int(distance*2*relative_eye_size),int(distance*relative_eye_size)))
eye_image = eye_image.rotate(15)
Image.Image.paste(face_image, eye_image,(left_pupil[0] - int(distance*relative_eye_size),left_pupil[1]-int(distance*relative_eye_size/2)), eye_image)
Image.Image.paste(face_image, eye_image,(right_pupil[0] - int(distance*relative_eye_size),right_pupil[1]-int(distance*relative_eye_size/2)), eye_image)
count+=1
# face_image.show()
face_image.save(finalname)
# eye_image.show()
return True
links = {}
# Driver code
@app.route("/failed")
def failure():
return 'Program failed to find any eyes :('
@app.route("/", methods=['GET','POST'])
def index():
global count
if request.method=="POST":
file = request.files['file']
if file:
filename=secure_filename(file.filename)
# print("hello "+os.path.join(app.config['UPLOAD_FOLDER'], filename))
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
print("hello "+ filename)
if(imagecov(filename)):
return redirect("static/"+str(count-1)+".jpg")
else:
return redirect(url_for('failure'))
return render_template('index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, debug=True)
|
StarcoderdataPython
|
1600014
|
# ______________________________________________________________________________
# ******************************************************************************
#
# The simplest robot task: Just go and reach a point
#
# ______________________________________________________________________________
# ******************************************************************************
#-----------------------------------------------------------------------------
#SET THE PATH TO THE URDF AND MESHES
#Define robotName, urdfpath and initialConfig
#Make sure talos_description is in the ROS_PACKAGE_PATH
#from rospkg import RosPack
#rospack = RosPack()
#urdfPath = rospack.get_path('talos_description')+"/robots/talos_full_collision.urdf"
#urdfDir = [rospack.get_path('talos_description')+"/../"]
URDFPATH = "~/git/pyrene/talos-data"+"/robots/talos_reduced.urdf"
URDFDIR = ["~/git/pyrene/talos-data"+"/../"]
MOTION_SEQUENCE = "~/git/pyrene/pyrene-motions/grabHandrail15/stairs_15cm_handrail_grab_actuated"
DISPLAY = True
dt = 1e-3
robotName = 'TALOS'
OperationalPointsMap = {'left-wrist' : 'arm_left_7_joint',
'right-wrist' : 'arm_right_7_joint',
'left-ankle' : 'leg_left_6_joint',
'right-ankle' : 'leg_right_6_joint',
'gaze' : 'head_2_joint',
'waist' : 'root_joint',
'chest' : 'torso_2_joint'}
halfSitting = (0.0, 0.0, 1.018213, 0.00 , 0.0, 0.0, #Free flyer
0.0, 0.0, -0.411354, 0.859395, -0.448041, -0.001708, #Left Leg
0.0, 0.0, -0.411354, 0.859395, -0.448041, -0.001708, #Right Leg
0.0 , 0.006761, #Chest
0.25847 , 0.173046, -0.0002, -0.525366, 0.0, -0.0, 0.1, 0.1, #Left Arm
-0.25847 , -0.173046, 0.0002 , -0.525366, 0.0, 0.0, 0.1, 0.1, #Right Arm
0., 0. #Head
)
#-----------------------------------------------------------------------------
#---- ROBOT SPECIFICATIONS----------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#---- DYN --------------------------------------------------------------------
#-----------------------------------------------------------------------------
from pinocchio.robot_wrapper import RobotWrapper
import pinocchio as se3
from dynamic_graph.sot.dynamics_pinocchio import fromSotToPinocchio
pinocchioRobot = RobotWrapper(URDFPATH, URDFDIR, se3.JointModelFreeFlyer())
pinocchioRobot.initDisplay(loadModel=DISPLAY)
if DISPLAY:
pinocchioRobot.display(fromSotToPinocchio(halfSitting))
from dynamic_graph.sot.dynamics_pinocchio.humanoid_robot import HumanoidRobot
robot = HumanoidRobot(robotName, pinocchioRobot.model,
pinocchioRobot.data, halfSitting, OperationalPointsMap)
# ------------------------------------------------------------------------------
# ---- Kinematic Stack of Tasks (SoT) -----------------------------------------
# ------------------------------------------------------------------------------
from dynamic_graph import plug
from dynamic_graph.sot.core import SOT
sot = SOT('sot')
sot.setSize(robot.dynamic.getDimension())
plug(sot.control,robot.device.control)
# DEFINE POSTURE TASK
from dynamic_graph.sot.core import Task, FeatureGeneric, GainAdaptive
from dynamic_graph.sot.core.meta_tasks import setGain
from dynamic_graph.sot.core.matrix_util import matrixToTuple
from numpy import identity, hstack, zeros
task_name = "posture_task"
taskPosture = Task(task_name)
taskPosture.dyn = robot.dynamic
taskPosture.feature = FeatureGeneric('feature_'+task_name)
taskPosture.featureDes = FeatureGeneric('feature_des_'+task_name)
taskPosture.gain = GainAdaptive("gain_"+task_name)
robotDim = robot.dynamic.getDimension()
first_6 = zeros((32,6))
other_dof = identity(robotDim-6)
jacobian_posture = hstack([first_6, other_dof])
taskPosture.feature.jacobianIN.value = matrixToTuple( jacobian_posture )
taskPosture.feature.setReference(taskPosture.featureDes.name)
taskPosture.add(taskPosture.feature.name)
#DEFINE SEQUENCE PLAYER
from dynamic_graph.sot.tools import SimpleSeqPlay
seqplay = SimpleSeqPlay("seq_play")
seqplay.load(MOTION_SEQUENCE)
#MAKE CONNECTIONS
from dynamic_graph.sot.core import Selec_of_vector
plug(seqplay.posture, taskPosture.featureDes.errorIN)
getPostureValue = Selec_of_vector("current_posture")
getPostureValue.selec(6,robotDim)
plug(robot.dynamic.position, getPostureValue.sin)
plug(getPostureValue.sout, taskPosture.feature.errorIN)
plug(getPostureValue.sout, seqplay.currentPosture)
setGain(taskPosture.gain,(4.9,0.9,0.01,0.9))
plug(taskPosture.gain.gain, taskPosture.controlGain)
plug(taskPosture.error, taskPosture.gain.error)
#START SEQUENCE PLAYER
seqplay.start()
taskPosture.featureDes.errorIN.recompute(0)
#PUSH TO SOLVER
sot.push(taskPosture.name)
#-------------------------------------------------------------------------------
#----- MAIN LOOP ---------------------------------------------------------------
#-------------------------------------------------------------------------------
def runner(n):
for i in xrange(n):
robot.device.increment(dt)
pinocchioRobot.display(fromSotToPinocchio(robot.device.state.value))
runner(3000)
|
StarcoderdataPython
|
161926
|
import io
import aiohttp
from PIL import Image
from plugin_system import Plugin
plugin = Plugin('Зеркало', usage=["отзеркаль <прикреплённые фото> - отзеркаливает прикреплённое фото"])
FAIL_MSG = 'К сожалению, произошла какая-то ошибка :('
@plugin.on_command('отзеркаль')
async def mirror(msg, args):
photo = False
for k, v in msg.brief_attaches.items():
if '_type' in k and v == "photo":
photo = True
break
if not photo:
return await msg.answer('Вы не прислали фото!')
attach = (await msg.full_attaches)[0]
if not attach.link:
return await msg.answer('Вы не прислали фото!')
async with aiohttp.ClientSession() as sess:
async with sess.get(attach.link) as response:
img = Image.open(io.BytesIO(await response.read()))
if not img:
return await msg.answer('К сожалению, ваше фото исчезло!')
w, h = img.size
part = img.crop((0, 0, w / 2, h))
part1 = part.transpose(Image.FLIP_LEFT_RIGHT)
img.paste(part1, (round(w / 2), 0))
buffer = io.BytesIO()
img.save(buffer, format='png')
buffer.seek(0)
result = await msg.vk.upload_photo(buffer)
return await msg.answer('Держи', attachment=str(result))
|
StarcoderdataPython
|
1650786
|
import sys
sys.path.append("..")
from models.network import *
from utils.utils import get_args
from utils.config import process_config
from utils.dirs import create_dirs
from data_loader.data_generator import *
from trainer.trainer import Trainer
from utils.logger import Logger
def main():
try:
args = get_args()
config = process_config(args.config)
except:
print("missing or invalid arguments")
exit(0)
create_dirs([config.summary_dir,config.checkpoint_dir])
dataset = DataGenerator(config)
sess = tf.Session()
logger = Logger(sess, config)
net = YOLO2(config)
trainer = Trainer(sess,net,dataset,config,logger)
trainer.train()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1740489
|
<reponame>DaleProctor/tscharts
#(C) Copyright <NAME> 2017-2020
#(C) Copyright Thousand Smiles Foundation 2017-2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Unit tests for the category API. Assumes django server is up
and running on the specified host and port
'''
import unittest
import getopt, sys
import json
from tschartslib.service.serviceapi import ServiceAPI
from tschartslib.tscharts.tscharts import Login, Logout
class CreateCategory(ServiceAPI):
def __init__(self, host, port, token, payload):
super(CreateCategory, self).__init__()
self.setHttpMethod("POST")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setPayload(payload)
self.setURL("tscharts/v1/category/")
class GetCategory(ServiceAPI):
def makeURL(self):
hasQArgs = False
if not self._id == None:
base = "tscharts/v1/category/{}/".format(self._id)
else:
base = "tscharts/v1/category/"
if not self._name == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "name={}".format(self._name)
hasQArgs = True
self.setURL(base)
def __init__(self, host, port, token):
super(GetCategory, self).__init__()
self.setHttpMethod("GET")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._name = None
self._id = None
self.makeURL();
def setId(self, id):
self._id = id;
self.makeURL()
def setName(self,val):
self._name = val
self.makeURL()
class DeleteCategory(ServiceAPI):
def __init__(self, host, port, token, id):
super(DeleteCategory, self).__init__()
self.setHttpMethod("DELETE")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setURL("tscharts/v1/category/{}/".format(id))
class TestTSCategory(unittest.TestCase):
def setUp(self):
login = Login(host, port, username, password)
ret = login.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("token" in ret[1])
global token
token = ret[1]["token"]
def testCreateCategory(self):
data = {}
data["name"] = "Category 1"
x = CreateCategory(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetCategory(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = ret[1]
self.assertEqual(ret['name'], "Category 1")
x = CreateCategory(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400) #bad request test uniqueness
x = DeleteCategory(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetCategory(host, port, token)
x.setId(id)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 404) # not found
data = {}
x = CreateCategory(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400) #bad request
data["names"] = "Category 1"
x = CreateCategory(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400) #bad request
data = {}
data["name"] = ""
x = CreateCategory(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400)
data = {}
data["name"] = 123
x = CreateCategory(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400)
def testDeleteCategory(self):
data = {}
data["name"] = "Category 1"
x = CreateCategory(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
id = int(ret[1]["id"])
x = GetCategory(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = ret[1]
self.assertEqual(ret["name"], "Category 1")
self.assertEqual(ret["id"], id)
x = DeleteCategory(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetCategory(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
x = DeleteCategory(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
def testGetCategory(self):
data = {}
data["name"] = "Category 1"
x = CreateCategory(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
x = GetCategory(host, port, token); #test get a category by its id
x.setId(int(ret[1]["id"]))
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = ret[1]
id = int(ret["id"])
self.assertTrue(ret["name"] == "Category 1")
x = DeleteCategory(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetCategory(host, port, token)
x.setId(id)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 404)
data = {}
data["name"] = "Category 2"
x = CreateCategory(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
id = ret[1]["id"]
x = GetCategory(host, port, token)
x.setName("Category 2")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue(ret[1]["name"] == "Category 2")
x = GetCategory(host, port, token)
x.setName("aaaa")
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 404) #not found
x = DeleteCategory(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
namelist = ['b','bbc','ad','ac','aac']
copynamelist = ['b','bbc','ad','ac','aac']
idlist = []
for x in namelist:
data = {}
data["name"] = x
x = CreateCategory(host, port, token, data)
ret = x.send(timeout = 30)
idlist.append(ret[1]["id"])
self.assertEqual(ret[0], 200)
x = GetCategory(host, port, token) #test get a list of categories
ret = x.send(timeout = 30)
for name in ret[1]:
self.assertTrue(name["name"] in namelist)
copynamelist.remove(name["name"])
self.assertEqual(copynamelist, [])
for id in idlist:
x = DeleteCategory(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
for id in idlist:
x = GetCategory(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) #not found
def usage():
print("category [-h host] [-p port] [-u username] [-w password]")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h:p:u:w:")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
global host
host = "127.0.0.1"
global port
port = 8000
global username
username = None
global password
password = None
for o, a in opts:
if o == "-h":
host = a
elif o == "-p":
port = int(a)
elif o == "-u":
username = a
elif o == "-w":
password = a
else:
assert False, "unhandled option"
unittest.main(argv=[sys.argv[0]])
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4840625
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.dummy_driver import DummyDriver
from org.o3project.odenos.remoteobject.manager.system.component_connection_logic_and_network\
import ComponentConnectionLogicAndNetwork
from org.o3project.odenos.remoteobject.manager.system.component_connection\
import ComponentConnection
from org.o3project.odenos.core.util.system_manager_interface import (
SystemManagerInterface
)
from org.o3project.odenos.core.component.network.packet.out_packet_added\
import OutPacketAdded
from org.o3project.odenos.core.util.network_interface import NetworkInterface
from org.o3project.odenos.core.component.network.flow.flow import Flow
from org.o3project.odenos.remoteobject.message.event import Event
from mock import Mock, patch
import unittest
from contextlib import nested
class DummyDriverTest(unittest.TestCase):
Dispatcher = Mock()
Dispatcher.system_manager_id = "ObjectId1"
Object_id = "ObjectId1"
def setUp(self):
self.target = DummyDriver(self.Object_id, self.Dispatcher)
def tearDown(self):
self.target = None
def test_constructor(self):
self.assertEqual(self.target._super_type, "Driver")
self.assertEqual(self.target.dispatcher, self.Dispatcher)
self.assertEqual(self.target.object_id, self.Object_id)
self.assertEqual(self.target._DummyDriver__network_id, None)
def test__connection_changed_added_pre_True(self):
cclan = ComponentConnectionLogicAndNetwork("slicer1->network1",
"original",
"running",
self.Object_id,
"NetworkId")
msg = Mock()
msg.curr = cclan
self.result = self.target._connection_changed_added_pre(msg)
self.assertEqual(self.result, True)
def test__connection_changed_delete_pre_True(self):
cclan = ComponentConnectionLogicAndNetwork("slicer1->network1",
"original",
"running",
self.Object_id,
"NetworkId")
msg = Mock()
msg.prev = cclan
self.result = self.target._connection_changed_delete_pre(msg)
self.assertEqual(self.result, True)
def test__connection_changed_added_pre_NotType_False(self):
ComponentConnectioTmp = ComponentConnection("slicer1_network1",
"original",
"running")
msg = Mock()
msg.curr = ComponentConnectioTmp
self.result = self.target._connection_changed_added_pre(msg)
self.assertEqual(self.result, False)
def test__connection_changed_delete_pre_NotType_False(self):
ComponentConnectioTmp = ComponentConnection("slicer1_network1",
"original",
"running")
msg = Mock()
msg.prev = ComponentConnectioTmp
self.result = self.target._connection_changed_delete_pre(msg)
self.assertEqual(self.result, False)
def test__connection_changed_added_pre_Notlogicid_False(self):
cclan = ComponentConnectionLogicAndNetwork("slicer1->network1",
"original",
"running",
"Logicid1",
"NetworkId")
msg = Mock()
msg.curr = cclan
self.result = self.target._connection_changed_added_pre(msg)
self.assertEqual(self.result, False)
def test__connection_changed_delete_pre_Notlogicid_False(self):
cclan = ComponentConnectionLogicAndNetwork("slicer1->network1",
"original",
"running",
"Logicid1",
"NetworkId")
msg = Mock()
msg.prev = cclan
self.result = self.target._connection_changed_delete_pre(msg)
self.assertEqual(self.result, False)
def test__connection_changed_added_pre_NotNoneNetworkid_False(self):
cclan = ComponentConnectionLogicAndNetwork("slicer1->network1",
"original",
"running",
self.Object_id,
"NetworkId")
msg = Mock()
msg.curr = cclan
self.target._DummyDriver__network_id = "NetworkId"
with nested(
patch('org.o3project.odenos.core.util.system_manager_interface.'
'SystemManagerInterface.put_connection'
)) as m_put_connection:
self.result = self.target._connection_changed_added_pre(msg)
self.assertEqual(m_put_connection[0].call_count, 1)
m_put_connection[0].assert_any_call(cclan)
self.assertEqual(cclan.state, ComponentConnection.State.ERROR)
self.assertEqual(self.result, False)
def test_connection_changed_added(self):
cclan = ComponentConnectionLogicAndNetwork("slicer1->network1",
"original",
"running",
self.Object_id,
"NetworkId")
msg = Mock()
msg.curr = cclan
with nested(
patch('org.o3project.odenos.core.component.dummy_driver.'
'DummyDriver._DummyDriver__subscribe_network_component'
)) as m_subscribe_network_comp:
self.assertEqual(self.target._DummyDriver__network_id, None)
self.target._connection_changed_added(msg)
self.assertEqual(self.target._DummyDriver__network_id,
"NetworkId")
self.assertEqual(m_subscribe_network_comp[0].call_count, 1)
def test_connection_changed_delete(self):
cclan = ComponentConnectionLogicAndNetwork("slicer1->network1",
"original",
"running",
self.Object_id,
"NetworkId")
msg = Mock()
msg.prev = cclan
self.target._DummyDriver__network_id = "NetworkId"
with nested(
patch('org.o3project.odenos.core.util.system_manager_interface.'
'SystemManagerInterface.put_connection'
),
patch('org.o3project.odenos.core.component.dummy_driver.'
'DummyDriver._DummyDriver__unsubscribe_network_component'
)) as (m_put_connection, m_unsubscribe_network_comp):
self.assertEqual(self.target._DummyDriver__network_id,
"NetworkId")
self.target._connection_changed_delete(msg)
self.assertEqual(self.target._DummyDriver__network_id,
None)
self.assertEqual(m_unsubscribe_network_comp.call_count, 1)
self.assertEqual(m_put_connection.call_count, 2)
def test___subscribe_network_component(self):
self.target._DummyDriver__network_id = "NetworkId"
with nested(
patch('org.o3project.odenos.core.component.logic.Logic.'
'_add_event_subscription'),
patch('org.o3project.odenos.core.component.logic.Logic.'
'_update_event_subscription'),
patch('org.o3project.odenos.remoteobject.remote_object.'
'RemoteObject._apply_event_subscription'
)) as (m_add_event_subscription,
m_update_event_subscription,
m_apply_event_subscription):
self.target._DummyDriver__subscribe_network_component()
self.assertEqual(m_add_event_subscription.call_count, 2)
m_add_event_subscription.assert_any_call("FlowChanged",
"NetworkId")
m_add_event_subscription.assert_any_call("OutPacketAdded",
"NetworkId")
self.assertEqual(m_update_event_subscription.call_count, 1)
m_update_event_subscription.assert_any_call("FlowChanged",
"NetworkId",
attributes=[])
self.assertEqual(m_apply_event_subscription.call_count, 1)
def test___unsubscribe_network_component(self):
self.target._DummyDriver__network_id = "NetworkId"
with nested(
patch('org.o3project.odenos.core.component.logic.Logic.'
'_remove_event_subscription'),
patch('org.o3project.odenos.remoteobject.remote_object.'
'RemoteObject._apply_event_subscription'
)) as (m_remove_event_subscription,
m_apply_event_subscription):
self.target._DummyDriver__unsubscribe_network_component()
self.assertEqual(m_remove_event_subscription.call_count, 2)
m_remove_event_subscription.assert_any_call("FlowChanged",
"NetworkId")
m_remove_event_subscription.assert_any_call("OutPacketAdded",
"NetworkId")
self.assertEqual(m_apply_event_subscription.call_count, 1)
def test__on_flow_added_success(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "none", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=flow_body1),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_added("NetworkId", flow_body2)
self.assertEqual(m_get_flow.call_count, 2)
m_get_flow.assert_any_call("FlowId1")
self.assertEqual(m_put_flow.call_count, 2)
m_put_flow.assert_any_call(flow_body1)
self.assertEqual(flow_body1.status, "established")
def test__on_flow_added_NotNetworkId(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "none", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=flow_body1),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_added("NetworkId_dmy", flow_body2)
self.assertEqual(m_get_flow.call_count, 0)
self.assertEqual(m_put_flow.call_count, 0)
self.assertEqual(flow_body1.status, "none")
def test__on_flow_added_NoneTargetFlow(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "none", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=None),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_added("NetworkId", flow_body2)
self.assertEqual(m_get_flow.call_count, 1)
m_get_flow.assert_any_call("FlowId1")
self.assertEqual(m_put_flow.call_count, 0)
self.assertEqual(flow_body1.status, "none")
def test__on_flow_added_status_NotNone(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "establishing", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=flow_body1),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_added("NetworkId", flow_body2)
self.assertEqual(m_get_flow.call_count, 1)
m_get_flow.assert_any_call("FlowId1")
self.assertEqual(m_put_flow.call_count, 0)
self.assertEqual(flow_body1.status, "establishing")
def test__on_flow_added_enabled_False(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", False,
65535, "none", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=flow_body1),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_added("NetworkId", flow_body2)
self.assertEqual(m_get_flow.call_count, 1)
m_get_flow.assert_any_call("FlowId1")
self.assertEqual(m_put_flow.call_count, 0)
self.assertEqual(flow_body1.status, "none")
def test__on_flow_update(self):
Curr = Mock()
Prev = Mock()
Attrs = {}
with nested(
patch('org.o3project.odenos.core.component.dummy_driver.'
'DummyDriver._on_flow_added'
)) as m_on_flow_added:
self.target._on_flow_update("NetworkId", Prev, Curr, Attrs)
self.assertEqual(m_on_flow_added[0].call_count, 1)
m_on_flow_added[0].assert_any_call("NetworkId", Curr)
def test__on_flow_delete_success(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=flow_body1),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_delete("NetworkId", flow_body2)
self.assertEqual(m_get_flow.call_count, 2)
m_get_flow.assert_any_call("FlowId1")
self.assertEqual(m_put_flow.call_count, 2)
m_put_flow.assert_any_call(flow_body1)
self.assertEqual(flow_body1.status, "none")
def test__on_flow_delete_NotNetworkId(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=flow_body1),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_delete("NetworkId_dmy", flow_body2)
self.assertEqual(m_get_flow.call_count, 0)
self.assertEqual(m_put_flow.call_count, 0)
self.assertEqual(flow_body1.status, "established")
def test__on_flow_delete_NoneTargetFlow(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=None),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_delete("NetworkId", flow_body2)
self.assertEqual(m_get_flow.call_count, 1)
m_get_flow.assert_any_call("FlowId1")
self.assertEqual(m_put_flow.call_count, 0)
self.assertEqual(flow_body1.status, "established")
def test__on_flow_delete_status_NotEstablished(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "none", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=flow_body1),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_delete("NetworkId", flow_body2)
self.assertEqual(m_get_flow.call_count, 1)
m_get_flow.assert_any_call("FlowId1")
self.assertEqual(m_put_flow.call_count, 0)
self.assertEqual(flow_body1.status, "none")
def test__on_flow_delete_enabled_False(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
flow_body1 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", False,
65535, "none", {})
flow_body2 = Flow("BasicFlow", "v01",
"FlowId1",
"Owner", True,
65535, "established", {})
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.get_flow', return_value=flow_body1),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.put_flow'
)) as (m_get_flow,
m_put_flow):
self.target._on_flow_delete("NetworkId", flow_body2)
self.assertEqual(m_get_flow.call_count, 1)
m_get_flow.assert_any_call("FlowId1")
self.assertEqual(m_put_flow.call_count, 0)
self.assertEqual(flow_body1.status, "none")
def test__do_event_outpacketadded_success(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
Evbody = {"id": "OutPacketId1"}
Event1 = Event("NetworkId", "OutPacketAdded", Evbody)
Event_packed = OutPacketAdded.create_from_packed(Evbody)
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.component.network.packet.'
'out_packet_added.OutPacketAdded.create_from_packed',
return_value=Event_packed),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.del_out_packet'),
patch('logging.error'
)) as (m_create_from_packed,
m_del_out_packet,
m_logerror):
self.target._do_event_outpacketadded(Event1)
self.assertEqual(m_create_from_packed.call_count, 1)
m_create_from_packed.assert_any_call(Evbody)
self.assertEqual(m_del_out_packet.call_count, 1)
m_del_out_packet.assert_any_call("OutPacketId1")
self.assertEqual(m_logerror.call_count, 0)
def test__do_event_outpacketadded_create_from_packed_error(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
Evbody = {"id": "OutPacketId1"}
Event1 = Event("NetworkId", "OutPacketAdded", Evbody)
self.target._network_interfaces["NetworkId"] =\
NetworkInterface(Dispatcher, Network_id)
debug_log = "Receive Invalid OutPacketAdded Message"\
+ " KeyError: " + "1"
with nested(
patch('org.o3project.odenos.core.component.network.packet.'
'out_packet_added.OutPacketAdded.create_from_packed',
side_effect=KeyError(1)),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.del_out_packet'),
patch('logging.error'
)) as (m_create_from_packed,
m_del_out_packet,
m_logerror):
self.target._do_event_outpacketadded(Event1)
self.assertEqual(m_create_from_packed.call_count, 1)
m_create_from_packed.assert_any_call(Evbody)
self.assertEqual(m_del_out_packet.call_count, 0)
self.assertEqual(m_logerror.call_count, 1)
m_logerror.assert_any_call(debug_log)
def test__do_event_outpacketadded_NotNetworkId(self):
Dispatcher = Mock()
Dispatcher.network_id = "NetworkId"
Network_id = "NetworkId"
Evbody = {"id": "OutPacketId1"}
Event1 = Event("NetworkId_dmy", "OutPacketAdded", Evbody)
Event_packed = OutPacketAdded.create_from_packed(Evbody)
self.target._network_interfaces[Network_id] =\
NetworkInterface(Dispatcher, Network_id)
with nested(
patch('org.o3project.odenos.core.component.network.packet.'
'out_packet_added.OutPacketAdded.create_from_packed',
return_value=Event_packed),
patch('org.o3project.odenos.core.util.network_interface.'
'NetworkInterface.del_out_packet'),
patch('logging.error'
)) as (m_create_from_packed,
m_del_out_packet,
m_logerror):
self.target._do_event_outpacketadded(Event1)
self.assertEqual(m_create_from_packed.call_count, 1)
m_create_from_packed.assert_any_call(Evbody)
self.assertEqual(m_del_out_packet.call_count, 0)
self.assertEqual(m_logerror.call_count, 0)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1791990
|
import smtplib
import src.irulez.log as log
import src.irulez.util as util
from abc import ABC, abstractmethod
logger = log.get_logger('mail_processor')
class MailProcessor(ABC):
@abstractmethod
def send_mail(self, json_object) -> None:
pass
class AuthenticateSMTPProcessor(MailProcessor):
def __init__(self, user: object, pwd: object, port: object, url: object):
self.gmail_user = user
self.gmail_pwd = <PASSWORD>
self._from = user
self.port = port
self.url = url
def send_mail(self, payload: str) -> None:
json_object = util.deserialize_json(payload)
recipient = json_object['mails']
to = recipient if type(recipient) is list else [recipient]
subject = json_object['subject']
body = json_object['message']
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (self._from, ", ".join(to), subject, body)
try:
server = smtplib.SMTP(self.url, self.port)
server.ehlo()
server.starttls()
server.login(self.gmail_user, self.gmail_pwd)
server.sendmail(self._from, to, message)
server.close()
logger.info(f"successfully sent the mail")
except:
logger.warning(f"failed to send mail")
|
StarcoderdataPython
|
64120
|
from math import trunc
numero = float(input('Digite um numero inteiro: '))
print('O valor digitado foi {} e a sua porção inteira é {}'.format(numero, trunc(numero)))
'''
int (numero) => tambem funciona para estrair um inteiro
'''
|
StarcoderdataPython
|
197820
|
<reponame>peanutbutter144/nixpkgs<gh_stars>100-1000
# Ugliest Python code I've ever written. -- aszlig
import sys
def get_plist(path):
in_pack = False
in_str = False
current_key = None
buf = ""
packages = {}
package_name = None
package_attrs = {}
with open(path, 'r') as setup:
for line in setup:
if in_str and line.rstrip().endswith('"'):
package_attrs[current_key] = buf + line.rstrip()[:-1]
in_str = False
continue
elif in_str:
buf += line
continue
if line.startswith('@'):
in_pack = True
package_name = line[1:].strip()
package_attrs = {}
elif in_pack and ':' in line:
key, value = line.split(':', 1)
if value.lstrip().startswith('"'):
if value.lstrip()[1:].rstrip().endswith('"'):
value = value.strip().strip('"')
else:
in_str = True
current_key = key.strip().lower()
buf = value.lstrip()[1:]
continue
package_attrs[key.strip().lower()] = value.strip()
elif in_pack:
in_pack = False
packages[package_name] = package_attrs
return packages
def main():
packages = get_plist(sys.argv[1])
to_include = set()
def traverse(package):
to_include.add(package)
attrs = packages.get(package, {})
deps = attrs.get('requires', '').split()
for new_dep in set(deps) - to_include:
traverse(new_dep)
map(traverse, sys.argv[2:])
sys.stdout.write('[\n')
for package, attrs in packages.iteritems():
if package not in to_include:
cats = [c.lower() for c in attrs.get('category', '').split()]
if 'base' not in cats:
continue
install_line = attrs.get('install')
if install_line is None:
continue
url, size, hash = install_line.split(' ', 2)
pack = [
' {',
' url = "{0}";'.format(url),
' hash = "{0}";'.format(hash),
' }',
];
sys.stdout.write('\n'.join(pack) + '\n')
sys.stdout.write(']\n')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3275213
|
<reponame>lanargh/FaceMaskDetection<filename>utils.py<gh_stars>0
import os
import random
from os.path import basename
from shutil import copyfile
from zipfile import ZipFile
def genlabels(labels):
f = open("labels.txt","w+")
f.write("__ignore__\r\n")
f.write("_background_\r\n")
for label in labels:
f.write(label + "\r\n")
f.close()
def splitdata(input_folder, output_folder, train_size = 0.8):
if not os.path.exists(output_folder + '/train'):
train_dir = output_folder + '/train'
os.makedirs(train_dir)
if not os.path.exists(output_folder + '/validation'):
val_dir = output_folder + '/validation'
os.makedirs(val_dir)
train_counter = 0
validation_counter = 0
# Randomly assign an image to train or validation folder
for filename in os.listdir(input_folder):
if filename.endswith(".png"):
filetypes = ["json", "png"]
fileparts = filename.split('.')
if random.uniform(0, 1) <= train_size:
copyfile(os.path.join(input_folder, fileparts[0] + "." + filetypes[0]), os.path.join(train_dir, str(train_counter) + '.' + filetypes[0]))
copyfile(os.path.join(input_folder, filename), os.path.join(train_dir, str(train_counter) + '.' + filetypes[1]))
train_counter += 1
else:
copyfile(os.path.join(input_folder, fileparts[0] + "." + filetypes[0]), os.path.join(val_dir, str(validation_counter) + '.' + filetypes[0]))
copyfile(os.path.join(input_folder, filename), os.path.join(val_dir, str(validation_counter) + '.' + filetypes[1]))
validation_counter += 1
print('Copied ' + str(train_counter) + ' images to ' + train_dir)
print('Copied ' + str(validation_counter) + ' images to ' + val_dir)
def compress(folder_name, zip_name):
with ZipFile(zip_name + ".zip", "w") as zipObj:
for folderName, subfolders, filenames in os.walk(folder_name):
for filename in filenames:
filePath = os.path.join(folderName, filename)
zipObj.write(filePath, basename(filePath))
|
StarcoderdataPython
|
3202951
|
import os
import pytest
from intervaltree import Interval
from viridian_workflow import self_qc, primers
this_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(this_dir, "data", "primers")
class StatsTest:
def __init__(self, fail):
self.fail = fail
self.log = []
self.config = self_qc.default_config
def check_for_failure(self, **kwargs):
return self.fail
def test_cigar_tuple_construction():
ref = "AAA"
query = "AAA"
cigar = [
(3, 0),
]
assert self_qc.cigar_to_alts(ref, query, cigar) == [(0, "A"), (1, "A"), (2, "A")]
ref = "AAA"
query = "ATTAA"
cigar = [(1, 0), (2, 1), (2, 0)]
assert self_qc.cigar_to_alts(ref, query, cigar) == [
(0, "A"),
# (1, "TT"),
(1, "A"),
(2, "A"),
]
ref = "ATTAA"
query = "AAA"
cigar = [(1, 0), (2, 2), (2, 0)]
assert self_qc.cigar_to_alts(ref, query, cigar) == [
(0, "A"),
(1, "-"),
(2, "-"),
(3, "A"),
(4, "A"),
]
ref = "ATTAA"
query = "AAA"
cigar = [(0, 1), (2, 2), (0, 2)]
assert self_qc.cigar_to_alts(ref, query, cigar, pysam=True) == [
(0, "A"),
(1, "-"),
(2, "-"),
(3, "A"),
(4, "A"),
]
ref = "AAAA"
query = "GGGAAAA"
cigar = [(3, 4), (4, 0)]
assert self_qc.cigar_to_alts(ref, query, cigar, q_pos=3) == [
(0, "A"),
(1, "A"),
(2, "A"),
(3, "A"),
]
def test_mappy_cigar_liftover():
amplicon = primers.Amplicon("test_amplicon")
seq = "CTTCAGGTGATGGCACAACAAGTCCTATTTGAACATAGACTCACGAGATTGCGGTTATACTTTCGAAAATGGGAATCTGGAGTAAAAGACTAAAGTTAGATACACAGTTGCTTCACTTCAGACTATTACCAGCTGTACTCAACTCAATTGAGTACAGACACTGGTGTTGAACATGTGCCATCTTCTTCATCTACAATAAAATTGTTGATGAGCCTGAAGAACATGGTCCAATTCACACAACGACGGTTCATCCGGAGTTGTTAATCCAGTAATGGAACCAATTTATGATGAACCGACGACGACTACTAGCGTGCCTTTGTGTTACTCAAGCTGATGAGTACGAACTTATGTACTCATTCGTTTCGGGAAGAGACAGGTACGTTAATAGTTAATAGCGTACTTCTTTTTCTTGCTTTCGT"
cigar = [
(4, 32),
(0, 29),
(2, 2),
(0, 7),
(1, 1),
(0, 4),
(1, 1),
(0, 8),
(2, 1),
(0, 11),
(1, 3),
(0, 1),
(2, 1),
(0, 26),
(1, 1),
(0, 8),
(2, 1),
(0, 76),
(1, 2),
(0, 46),
(1, 1),
(0, 4),
(2, 1),
(0, 11),
(2, 1),
(0, 77),
(1, 2),
(0, 5),
(2, 1),
(0, 40),
(1, 1),
(0, 54),
(4, 70),
]
self_qc.cigar_to_alts(seq, seq, cigar, pysam=True)
def test_bias_test():
return True # TODO resolve
assert not self_qc.test_bias(10, 100, threshold=0.3)
assert not self_qc.test_bias(90, 100, threshold=0.3)
assert self_qc.test_bias(40, 100, threshold=0.3)
assert self_qc.test_bias(60, 100, threshold=0.3)
def test_stat_evaluation():
return True # resolve
fwd = self_qc.BaseProfile(False, True, "test_amplicon1")
rev = self_qc.BaseProfile(False, False, "test_amplicon2")
# 20% alt alleles
pileup20 = ["A", "A", "C", "T", "A", "A", "A", "A", "A", "A"]
# 0% alt alleles
pileup0 = ["A", "A", "A", "A", "A", "A", "A", "A", "A", "A"]
# 100% alt alleles
pileup100 = ["T", "T", "T", "G", "G", "G", "T", "G", "C", "C"]
stats = self_qc.Stats()
for base in pileup20:
if base != "A":
stats.add_alt(fwd)
stats.add_alt(rev)
else:
stats.add_ref(fwd)
stats.add_ref(rev)
assert stats.check_for_failure(bias_threshold=0.3)
def test_masking():
fail = StatsTest(True)
succeed = StatsTest(False)
sequence = "ATCATC"
stats = {0: succeed, 4: fail}
masked, _ = self_qc.mask_sequence(sequence, stats)
assert masked == "ATCANC"
sequence = "ATCATC"
stats = {0: fail, 4: fail}
masked, _ = self_qc.mask_sequence(sequence, stats)
assert masked == "NTCANC"
|
StarcoderdataPython
|
3317368
|
This is file3.
|
StarcoderdataPython
|
109387
|
from django.core.management.base import BaseCommand
from account.utils import send_bulk_reminders
class Command(BaseCommand):
help = 'Send reminders to users who have not signed in today'
def handle(self, *args, **options):
self.stdout.write('Preparing to send reminder emails...')
send_bulk_reminders()
self.stdout.write('Successfully sent reminders')
|
StarcoderdataPython
|
1785362
|
<filename>projects/src/main/python/CodeJam/Y11R5P1/dennislissov/generated_py_bb93a4d1acf540c3b7c8dfd80359bd28.py
import sys
sys.path.append('/home/george2/Raise/ProgramRepair/CodeSeer/projects/src/main/python')
from CodeJam.Y11R5P1.dennislissov.A import *
def func_3ba6780ca418469a885da55554034725(y, x):
area += (x - px) * (y + py) / 2
px, py = x, y
return py
def func_16bfa03b301e43139f8f644e5e7cbce6(y, x):
area += (x - px) * (y + py) / 2
px, py = x, y
return area
def func_5726577868614ce7b084d034f2376af6(y, x):
area += (x - px) * (y + py) / 2
px, py = x, y
return px
def func_dabbc99813184aeea69a2f2d636dea55(ax, px, py):
y = py + (y - py) / (x - px) * (ax - px)
x = ax
return x
def func_d0b5903d9cd747d2b4b964585933d19f(ax, px, py):
y = py + (y - py) / (x - px) * (ax - px)
x = ax
return y
def func_5e4c1430509d4af995186639565f815e():
px = 0
py = 0
return px
def func_1ca08860d10b4cf8aedded11e9dffb1a():
px = 0
py = 0
return py
def func_95288f9c7bff4bf682e6cf6ba68a8ba9():
py = 0
area = 0
return py
def func_f0ab3dbecaef42e5be66cbfb734b7890():
py = 0
area = 0
return area
def func_f2b586b319aa4cb98bfb783ddd7addcb(Y, X, ax):
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return px
def func_23ca7fa984f94fb3bfd7a107f4210e78(Y, X, ax):
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return x
def func_214e65bd98004678b3eeba446027da4f(Y, X, ax):
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return y
def func_daf1c94df0584af692b5f4bcea188648(Y, X, ax):
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
def func_b759ad0d09e94f0ca1ec6405fa944e58(Y, X, ax):
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return py
def func_9ed2c74c0e6244e4ab8a4f0968336c03(area, ax, px, py):
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
return area
def func_970a7c8ba3704e9491127e3947e0f97b():
px = 0
py = 0
area = 0
return px
def func_92b324bab0d74577a806743dce7beb52():
px = 0
py = 0
area = 0
return area
def func_e87e2a34f8cf4dd4aac49c50a0e53958():
px = 0
py = 0
area = 0
return py
def func_0697bf4db790456998f4524105458519(Y, X, ax):
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return py
def func_6f78f82a300f490c91a7d12da591655e(Y, X, ax):
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return x
def func_9faf09f55d8547e697b03888eb6faad5(Y, X, ax):
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return y
def func_8d0048a94dd640f9ae614473ce1fa916(Y, X, ax):
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
def func_557dcb3fb31b4f728f8a7bdca9e3e339(Y, X, ax):
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return px
def func_b00db3e3d55f476597b206c0f54ca686(Y, X, ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return py
def func_9796002a7e484de685cf089d82775124(Y, X, ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return y
def func_25a25b6a067440b8abb0cda23274198e(Y, X, ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
def func_7b65d97fa3534bcdbedbcad451839552(Y, X, ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return x
def func_f36584730d6b46698e631ec2eaf1e0f6(Y, X, ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return px
def func_c8e909b1d8ef45bb81720100de8be90b(ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
return W
def func_9faea165cee94f6aa49cf54e20f95a12(ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
return G
def func_e80907eba6cf46c6a8137886b8e2c14c(ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
return L
def func_f75d55cd2b1b409db59d7afe7c13087f(ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
return s
def func_c9bb9573097a4c6aae46b4a2cc64276f(ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
return U
def func_9f3ec1b85566455a95126385cf4e1f6c(Lp, infile):
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return i
def func_36e0888b67d74a9399f251b686416a94(Lp, infile):
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return G
def func_d81dfa3ea87e4bd0993037f02057bfab(Lp, infile):
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return U
def func_c2dec506230345dd97211ee64cb98359(Lp, infile):
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return L
def func_7aac99e646d94133aa7e23ab7bf64166(Lp, infile):
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return W
def func_b5c4f36135cf44ed877f852a51ed28df(Lp, infile):
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return s
def func_3cf08fece0a440de9549151f95996078(Ly, Uy, X):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
return area
def func_2d7c562e9b0148198e7fde596a113ed2(Ly, Uy, X):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
return Y
def func_659622b0340743c88c95b497a41a557d(Y, X):
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
return area
def func_396e1081307e40ce8f745bd0f137bf1e(Y, X, G):
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
return NAs
def func_eb77d078893e4f8aa2e7bdbe0984a0d0(Y, X, G):
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
return area
def func_beb781dcbb554bf2aebc9aa2f7370eae(area, W, G):
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return NA
def func_243ebc5672064a3f94074f635acf6021(area, W, G):
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return NAs
def func_4df8196ffbfc4d768e65e55a8edab4eb(area, W, G):
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return points
def func_f54dac81ea4543c2a1e561ea5ae647af(Lp, ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return W
def func_03f0a47b81f54349894ae80e3df364cc(Lp, ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return U
def func_c6a0c02b9aa94adcae8667d220d24ee3(Lp, ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return s
def func_2114f611135a404083fa23b8de1119d5(Lp, ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return L
def func_02054700d55544478b42e9b9e89ac0ca(Lp, ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return G
def func_947c0533be5744958f4bfe8f064a2a61(Lp, ncase, infile):
print('Case #{nc}'.format(nc=ncase + 1))
W, L, U, G = (int(s) for s in infile.readline().split())
Lp = [[int(s) for s in infile.readline().split()] for i in xrange(L)]
return i
def func_154098a41bec4dfdb03c4488e331d346(Ly, Uy, X):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
return area
def func_119cdf377a34446489b65815cb8fa1b0(Ly, Uy, X):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
return Y
def func_9e28664f45ad426ea5464abb2c10bdda(Y, X, G):
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
return NAs
def func_83c5c4d9fce24768b443d8707779be7a(Y, X, G):
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
return area
def func_b8d2303212d1411abe8f1b7731b8c215(Y, X, W, G):
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return area
def func_786c6689b60a439982c75bc980c41591(Y, X, W, G):
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return points
def func_2ca12634fb254c38be3999190921e11c(Y, X, W, G):
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return NA
def func_933ba58dabc946cab8d749e628cb13f8(Y, X, W, G):
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return NAs
def func_5032443b55fc49c386c19c4f27d8d31f(Ly, Uy, X, G):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
return Y
def func_56b85182ae2a432ebe43cc4137476bab(Ly, Uy, X, G):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
return area
def func_08eb2639638648dc8e6413685c237454(Ly, Uy, X, G):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
return NAs
def func_e1563650f1644cd8adc532a3cdb0e48c(Y, X, W, G):
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return points
def func_16c3d286b53a4185a059acb7cdd941b7(Y, X, W, G):
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return area
def func_1beea74415db416c8f122bf52ae540ce(Y, X, W, G):
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return NAs
def func_83c47fbce0804f06bb936b2044fccb53(Y, X, W, G):
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return NA
def func_f86ebe05fa1c4b76a19fe4214980a2e0(Ly, Uy, X, W, G):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return Y
def func_d474fb9112a84f449df6eccd94f0701a(Ly, Uy, X, W, G):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return NAs
def func_ec7540897fbc4859bba13c825191236c(Ly, Uy, X, W, G):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return NA
def func_617d4017aed54cd1a47436ee5f6a9c77(Ly, Uy, X, W, G):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return area
def func_9798caccb3644af2822a2f2778317aed(Ly, Uy, X, W, G):
Y = Uy - Ly
area = sum((Y[1:] + Y[:-1]) / 2 * (X[1:] - X[:-1]))
def cumul_area(ax):
px = 0
py = 0
area = 0
for x, y in zip(X, Y):
if x == px:
px, py = x, y
continue
if ax < x:
y = py + (y - py) / (x - px) * (ax - px)
x = ax
area += (x - px) * (y + py) / 2
px, py = x, y
if x == ax:
break
return area
NAs = np.arange(1, G, dtype='float') * area / G
points = [bisect(lambda x: cumul_area(x) - NA, 0, W) for NA in NAs]
return points
|
StarcoderdataPython
|
3369777
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 14 11:50:56 2020
@author: <NAME>
"""
# %% Import packages
import os
# %% Set the working directory
# IMPORTNAT!! make sure to set the working directory to the path where you
# stored the python files, including this script and the Data & Models folders.
os.chdir('D:/OneDrive/school/1. Master/8. Machine Learning and Multivariate '
+ 'Statistics/Assignment/CMAP_Drug_safety_Machine_learning')
# Load local scripts
from Data import init, feature_filter
import Tree_prediction
import SVM_prediction
import ANN
# %% Initialise the data
X_train, Y_train, X_test, Y_test = init(file = 'p3-phh-camda2020.csv',
label = 'DILI1',
upsample = True,
downsample = False)
# %% Feature Selection
important_feat_index = Tree_prediction.select_features(X_train, Y_train,
threshold = 'top1000')
# Filter all the data on the feature selection
[X_train, X_test] = feature_filter([X_train, X_test],
important_feat_index)
# %% Random Forest
forest_model = Tree_prediction.hyperparameter_tuning(X_train, Y_train,
X_test, Y_test,
score = 'roc_auc',
save_name = 'forest_model')
# %% Support Vector machine
SVM_model = SVM_prediction.hyperparameter_tuning(X_train, Y_train,
X_test, Y_test,
save_name = 'SVM_model')
# %% Artificial Neural Network
ANN_model = ANN.hyperparameter_tuning(X_train, Y_train, X_test, Y_test,
balance_class_weights = False)
|
StarcoderdataPython
|
4830722
|
<gh_stars>0
#Embedded file name: ACEStream\Core\DecentralizedTracking\pymdht\core\message_tools.pyo
import sys
import socket
from socket import inet_ntoa, inet_aton
import logging
import bencode
from identifier import Id, ID_SIZE_BYTES, IdError
from node import Node
logger = logging.getLogger('dht')
IP4_SIZE = 4
IP6_SIZE = 16
ADDR4_SIZE = IP4_SIZE + 2
ADDR6_SIZE = IP6_SIZE + 2
C_NODE_SIZE = ID_SIZE_BYTES + ADDR4_SIZE
C_NODE2_SIZE = ID_SIZE_BYTES + ADDR6_SIZE
IP6_PADDING = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff'
class AddrError(Exception):
pass
def bin_to_int(bin_str):
return ord(bin_str[0]) * 256 + ord(bin_str[1])
def int_to_bin(i):
return chr(i / 256) + chr(i % 256)
def compact_addr(addr):
return ''.join((inet_aton(addr[0]), int_to_bin(addr[1])))
def uncompact_addr(c_addr):
if len(c_addr) != ADDR4_SIZE:
raise AddrError, 'invalid address size'
if c_addr[0] == '\x7f' or c_addr[:2] == '\xc0\xa8':
logger.warning('Got private address: %r' % c_addr)
raise AddrError, 'private address'
ip = inet_ntoa(c_addr[:-2])
port = bin_to_int(c_addr[-2:])
if port == 0:
logger.warning('c_addr: %r > port is ZERO' % c_addr)
raise AddrError
return (ip, port)
def compact_peers(peers):
return [ compact_addr(peer) for peer in peers ]
def uncompact_peers(c_peers):
peers = []
for c_peer in c_peers:
try:
peers.append(uncompact_addr(c_peer))
except AddrError:
pass
return peers
def compact_nodes(nodes):
return ''.join([ node.id.bin_id + compact_addr(node.addr) for node in nodes ])
def uncompact_nodes(c_nodes):
if len(c_nodes) % C_NODE_SIZE != 0:
logger.info('invalid size (%d) %s' % (len(c_nodes), c_nodes))
return []
nodes = []
for begin in xrange(0, len(c_nodes), C_NODE_SIZE):
node_id = Id(c_nodes[begin:begin + ID_SIZE_BYTES])
try:
node_addr = uncompact_addr(c_nodes[begin + ID_SIZE_BYTES:begin + C_NODE_SIZE])
except AddrError:
pass
else:
node = Node(node_addr, node_id)
nodes.append(node)
return nodes
def compact_nodes2(nodes):
return [ node.id.bin_id + compact_addr(node.addr) for node in nodes ]
def uncompact_nodes2(c_nodes):
nodes = []
for c_node in c_nodes:
node_id = Id(c_node[:ID_SIZE_BYTES])
try:
node_addr = uncompact_addr(c_node[ID_SIZE_BYTES:])
except AddrError:
logger.warning('IPv6 addr in nodes2: %s' % c_node)
else:
node = Node(node_addr, node_id)
nodes.append(node)
return nodes
|
StarcoderdataPython
|
1763213
|
<filename>servo/system_config.py
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""System configuration module."""
import collections
import glob
import logging
import os
import re
import xml.etree.ElementTree
# valid tags in system config xml. Any others will be ignored
MAP_TAG = 'map'
CONTROL_TAG = 'control'
SYSCFG_TAG_LIST = [MAP_TAG, CONTROL_TAG]
ALLOWABLE_INPUT_TYPES = {'float': float, 'int': int, 'str': str}
# pylint: disable=g-bad-exception-name
# TODO(coconutruben): figure out if it's worth it to rename this so that it
# removes the 'stutter'
class SystemConfigError(Exception):
"""Error class for SystemConfig."""
class SystemConfig(object):
"""SystemConfig Class.
System config files describe how to talk to various pieces on the device under
test. The system config may be broken up into multiple file to make it easier
to share configs among similar DUTs. This class has the support to take in
multiple SystemConfig files and treat them as one unified structure
SystemConfig files are written in xml and consist of four main elements
0. Include : Ability to include other config files
<include>
<name>servo_loc.xml</name>
</include>
NOTE, All includes in a file WILL be sourced prior to any other elements in
the XML.
1. Map : Allow user-friendly naming for things to abstract
certain things like on=0 for things that are assertive low on
actual h/w
<map>
<name>onoff_i</name>
<doc>assertive low map for on/off</doc>
<params on="0" off="1"></params>
</map>
2. Control : Bulk of the system file. These elements are
typically gpios, adcs, dacs which allow either control or sampling
of events on the h/w. Controls should have a 1to1 correspondence
with hardware elements between control system and DUT.
<control>
<name>warm_reset</name>
<doc>Reset the device warmly</doc>
<params interface="1" drv="gpio" offset="5" map="onoff_i"></params>
</control>
TODO(tbroch) Implement sequence or deprecate
3. Sequence : List of control calls to create a desired
configuration of h/w. These could certainly be done by writing
simple scripts to send individual control calls to the server but
encapsulating them into the system file should allow for tighter
control of the sequence ... especially if timing of the sequence
is paramount.
<sequence>
<name>i2c_mux_seq</name>
<cmdlist>i2c_mux_en:off i2c_mux_add:__arg0__ i2c_mux_en:on</cmdlist>
</sequence>
Public Attributes:
control_tags: a dictionary of each base control and their tags if any
aliases: a dictionary of an alias mapped to its base control name
syscfg_dict: 3-deep dictionary created when parsing system files. Its
organized as [tag][name][type] where:
tag: map | control | sequence
name: string name of tag element
type: data type of payload either, doc | get | set presently
doc: string describing the map,control or sequence
get: a dictionary for getting values from named control
set: a dictionary for setting values to named control
hwinit: list of control tuples (name, value) to be initialized in order
Private Attributes:
_loaded_xml_files: set of filenames already loaded to avoid sourcing XML
multiple times.
"""
def __init__(self):
"""SystemConfig constructor."""
self._logger = logging.getLogger('SystemConfig')
self._logger.debug('')
self.control_tags = collections.defaultdict(list)
self.aliases = {}
self.syscfg_dict = collections.defaultdict(dict)
self.hwinit = []
self._loaded_xml_files = []
self._board_cfg = None
def find_cfg_file(self, filename):
"""Find the filename for a system XML config file.
If the provided `filename` names a valid file, use that.
Otherwise, `filename` must name a file in the 'data'
subdirectory stored with this module.
Returns the path selected as described above; if neither of the
paths names a valid file, return `None`.
Args:
filename: string of path to system file ( xml )
Returns:
string full path of |filename| if it exists, otherwise None
"""
if os.path.isfile(filename):
return filename
default_path = os.path.join(os.path.dirname(__file__), 'data')
fullname = os.path.join(default_path, filename)
if os.path.isfile(fullname):
return fullname
return None
@staticmethod
def tag_string_to_tags(tag_str):
"""Helper to split tag string into individual tags."""
return tag_str.split(',')
def get_all_cfg_names(self):
"""Return all XML config file names.
Returns:
A list of file names.
"""
exclude_re = re.compile(r'servo_.*_overlay\.xml')
pattern = os.path.join(os.path.dirname(__file__), 'data', '*.xml')
cfg_names = []
for name in glob.glob(pattern):
name = os.path.basename(name)
if not exclude_re.match(name):
cfg_names.append(name)
return cfg_names
def set_board_cfg(self, filename):
"""Save the filename for the board config."""
self._board_cfg = filename
def get_board_cfg(self):
"""Return the board filename."""
return self._board_cfg
def add_cfg_file(self, filename, name_prefix=None, interface_increment=0):
"""Add system config file to the system config object.
Each design may rely on multiple system files so need to have the facility
to parse them all.
For example, we may have a:
1. default for all controls that are the same for each of the
control systems
2. default for a particular DUT system's usage across the
connector
3. specific one for particular version of DUT (evt,dvt,mp)
4. specific one for a one-off rework done to a system
Special key parameters in config files:
clobber_ok: signifies this control may _clobber_ an existing definition
of the same name. Note, its value is ignored ( clobber_ok='' )
NOTE, method is recursive when parsing 'include' elements from XML.
The arguments name_prefix and interface_increment are used to support
multiple servo micros. The interfaces of the extra servo micros, like
the one for hammer, are relocated to higher slots. The controls of this
extra servo micros should be shifted their interface numbers. Adding
the name prefix avoid conflict with the main servo micro.
Args:
filename: string of path to system file ( xml )
name_prefix: string to prepend to all control names
interface_increment: number to add to all interfaces
Raises:
SystemConfigError: for schema violations, or file not found.
"""
cfgname = self.find_cfg_file(filename)
if not cfgname:
msg = 'Unable to find system file %s' % filename
self._logger.error(msg)
raise SystemConfigError(msg)
filename = cfgname
if (filename, name_prefix, interface_increment) in self._loaded_xml_files:
self._logger.warn('Already sourced system file (%s, %s, %d).', filename,
name_prefix, interface_increment)
return
self._loaded_xml_files.append((filename, name_prefix, interface_increment))
self._logger.info('Loading XML config (%s, %s, %d)', filename, name_prefix,
interface_increment)
root = xml.etree.ElementTree.parse(filename).getroot()
for element in root.findall('include'):
self.add_cfg_file(
element.find('name').text, name_prefix, interface_increment)
for tag in SYSCFG_TAG_LIST:
for element in root.findall(tag):
element_str = xml.etree.ElementTree.tostring(element)
try:
name = element.find('name').text
if tag == CONTROL_TAG and name_prefix:
name = name_prefix + name
except AttributeError:
# TODO(tbroch) would rather have lineno but dumping element seems
# better than nothing. Utimately a DTD/XSD for the XML schema will
# catch these anyways.
raise SystemConfigError('%s: no name ... see XML\n%s' % (tag,
element_str))
try:
doc = ' '.join(element.find('doc').text.split())
except AttributeError:
doc = 'undocumented'
try:
alias = element.find('alias').text
except AttributeError:
alias = None
try:
remap = element.find('remap').text
except AttributeError:
remap = None
if remap:
try:
if name_prefix:
remap = name_prefix + remap
self.syscfg_dict[tag][remap] = self.syscfg_dict[tag][name]
except KeyError:
# Sometimes the remap control doesn't exist (e.g. fw_up in servo
# v4). Just ignore it and continue on.
pass
continue
get_dict = None
set_dict = None
clobber_ok = False
params_list = element.findall('params')
# Modify the interface attributes.
for params in params_list:
if 'interface' in params.attrib:
if params.attrib['interface'] != 'servo':
interface_id = int(params.attrib['interface'])
params.attrib['interface'] = interface_id + interface_increment
if len(params_list) == 2:
assert tag != MAP_TAG, 'maps have only one params entry'
for params in params_list:
if 'cmd' not in params.attrib:
raise SystemConfigError('%s %s multiple params but no cmd\n%s' %
(tag, name, element_str))
cmd = params.attrib['cmd']
if cmd == 'get':
if get_dict:
raise SystemConfigError(
'%s %s multiple get params defined\n%s' % (tag, name,
element_str))
get_dict = params.attrib
elif cmd == 'set':
if set_dict:
raise SystemConfigError(
'%s %s multiple set params defined\n%s' % (tag, name,
element_str))
set_dict = params.attrib
else:
raise SystemConfigError("%s %s cmd of 'get'|'set' not found\n%s" %
(tag, name, element_str))
elif len(params_list) == 1:
get_dict = params_list[0].attrib
set_dict = get_dict
else:
raise SystemConfigError('%s %s has illegal number of params %d\n%s' %
(tag, name, len(params_list), element_str))
# Save the control name to the params dicts, such that the driver can
# refer to it.
if tag == CONTROL_TAG:
get_dict['control_name'] = name
set_dict['control_name'] = name
clobber_ok = ('clobber_ok' in set_dict or 'clobber_ok' in get_dict)
if (tag == CONTROL_TAG and name in self.syscfg_dict[tag] and
not clobber_ok):
raise SystemConfigError(
"Duplicate %s %s without 'clobber_ok' key\n%s" % (tag, name,
element_str))
if tag == MAP_TAG:
self.syscfg_dict[tag][name] = {'doc': doc, 'map_params': get_dict}
if alias:
raise SystemConfigError('No aliases for maps allowed')
continue
if 'init' in set_dict:
hwinit_found = False
# only allow one hwinit per control
if clobber_ok:
for i, (hwinit_name, _) in enumerate(self.hwinit):
if hwinit_name == name:
self.hwinit[i] = (name, set_dict['init'])
hwinit_found = True
break
if not hwinit_found:
self.hwinit.append((name, set_dict['init']))
if clobber_ok and name in self.syscfg_dict[tag]:
# it's an existing control
self.syscfg_dict[tag][name]['get_params'].update(get_dict)
self.syscfg_dict[tag][name]['set_params'].update(set_dict)
if doc != 'undocumented':
self.syscfg_dict[tag][name]['doc'] = doc
else:
# it's a new control
self.syscfg_dict[tag][name] = {
'doc': doc,
'get_params': get_dict,
'set_params': set_dict
}
if alias:
for aliasname in (elem.strip() for elem in alias.split(',')):
if name_prefix:
aliasname = name_prefix + aliasname
self.syscfg_dict[tag][aliasname] = self.syscfg_dict[tag][name]
# Also store what the alias relationship
self.aliases[aliasname] = name
def finalize(self):
"""Finalize setup, Call this after no more config files will be added.
Note: this can be called repeatedly, and will overwrite the previous
results.
- Sets up tags for each control, if provided
"""
self.control_tags.clear()
# Tags are only stored for the primary control name, not their aliases.
base_controls = [control for control in self.syscfg_dict[CONTROL_TAG]
if control not in self.aliases]
for control in base_controls:
# Tags are only read out of the get dict for now. If a control defines
# seperate get and set dicts, please make sure to define the tags in the
# get dict.
get_dict = self.syscfg_dict[CONTROL_TAG][control]['get_params']
if 'tags' in get_dict:
tags = SystemConfig.tag_string_to_tags(get_dict['tags'])
for tag in tags:
self.control_tags[tag].append(control)
def get_controls_for_tag(self, tag):
"""Get list of controls for a given tag.
Args:
tag: str, tag to query
Returns:
list of controls with that tag, or an empty list if no such tag, or
controls under that tag
"""
# Checking here ensures that we do not generate an empty list (as it's a
# default dict)
if tag not in self.control_tags:
self._logger.info('Tag %s unknown.', tag)
return []
return self.control_tags[tag]
def lookup_control_params(self, name, is_get=True):
"""Lookup & return control parameter dictionary.
Note, controls must have either one or two set of parameters. In the case
of two, the dictionary must contain k=v element of 'type':'get' or
'type':'set'
Args:
name: string of control name to lookup
is_get: boolean of whether params should be for 'get' | 'set'
Returns:
control's parameter dictionary for approrpiate get or set
Raises:
NameError: if control name not found
SystemConfigError: if error encountered identifying parameters
"""
if name not in self.syscfg_dict[CONTROL_TAG]:
raise NameError('No control named %s. All controls:\n%s' %
(name,
','.join(sorted(self.syscfg_dict[CONTROL_TAG]))))
if is_get:
return self.syscfg_dict[CONTROL_TAG][name]['get_params']
else:
return self.syscfg_dict[CONTROL_TAG][name]['set_params']
def is_control(self, name):
"""Determine if name is a control or not.
Args:
name: string of control name to lookup
Returns:
boolean, True if name is control, False otherwise
"""
return name in self.syscfg_dict[CONTROL_TAG]
def get_control_docstring(self, name):
"""Get controls doc string.
Args:
name: string of control name to lookup
Returns:
doc string of the control
"""
return self.syscfg_dict[CONTROL_TAG][name]['doc']
def _lookup(self, tag, name_str):
"""Lookup the tag name_str and return dictionary or None if not found.
Args:
tag: string of tag (from SYSCFG_TAG_LIST) to look for name_str under.
name_str: string of name to lookup
Returns:
dictionary from syscfg_dict[tag][name_str] or None
"""
self._logger.debug('lookup of %s %s', tag, name_str)
return self.syscfg_dict[tag].get(name_str)
def resolve_val(self, params, map_vstr):
"""Resolve string value.
Values to set the control to can be mapped to symbolic strings for better
readability. For example, its difficult to remember assertion levels of
various gpios. Maps allow things like 'reset:on'. Also provides
abstraction so that assertion level doesn't have to be exposed.
Args:
params: parameters dictionary for control
map_vstr: string thats acceptable values are:
an int (can be "DECIMAL", "0xHEX", 0OCT", or "0bBINARY".
a floating point value.
an alphanumeric which is key in the corresponding map dictionary.
Returns:
Resolved value as float or int or str depending on mapping & input type
Raises:
SystemConfigError: mapping issues found
"""
# its a map
err = 'Unknown error formatting input value.'
if 'map' in params:
map_dict = self._lookup(MAP_TAG, params['map'])
if map_dict is None:
raise SystemConfigError("Map %s isn't defined" % params['map'])
try:
map_vstr = map_dict['map_params'][map_vstr]
except KeyError:
# Do not raise error yet. This might just be that the input is not
# using the map i.e. it's directly writing a raw mapped value.
err = "Map '%s' doesn't contain key '%s'\n" % (params['map'], map_vstr)
err += "Try one of -> '%s'" % "', '".join(map_dict['map_params'])
if 'input_type' in params:
if params['input_type'] in ALLOWABLE_INPUT_TYPES:
try:
input_type = ALLOWABLE_INPUT_TYPES[params['input_type']]
return input_type(map_vstr)
except ValueError:
err += "\n%s Input should be 'int' or 'float'." % ('Or' if 'Map' in
err else '')
else:
self._logger.error('Unrecognized input type.')
# TODO(tbroch): deprecate below once all controls have input_type params
try:
return int(str(map_vstr), 0)
except ValueError:
pass
try:
return float(str(map_vstr))
except ValueError:
# No we know that nothing worked, and there was an error.
raise SystemConfigError(err)
# pylint: disable=invalid-name
# Naming convention to dynamically find methods based on config parameter
def _Fmt_hex(self, int_val):
"""Format integer into hex.
Args:
int_val: integer to be formatted into hex string
Returns:
string of integer in hex format
"""
return hex(int_val)
def reformat_val(self, params, value):
"""Reformat value.
Formatting determined via:
1. if has map, then remap
2. else if has fmt param, use that function
Args:
params: parameter dictionary for control
value: value to reformat
Returns:
formatted string value if reformatting needed
value otherwise
Raises:
SystemConfigError: errors using formatting param
"""
# TODO(crbug.com/841097): revisit logic for value here once
# resolution found on bug.
if value is not None and 'map' not in params and 'fmt' not in params:
return value
reformat_value = str(value)
if 'map' in params:
map_dict = self._lookup(MAP_TAG, params['map'])
if map_dict:
for keyname, val in map_dict['map_params'].items():
if val == reformat_value:
reformat_value = keyname
break
elif 'fmt' in params:
fmt = params['fmt']
try:
func = getattr(self, '_Fmt_%s' % fmt)
except AttributeError:
raise SystemConfigError('Unrecognized format %s' % fmt)
try:
reformat_value = func(value)
except Exception:
raise SystemConfigError('Problem executing format %s' % fmt)
return reformat_value
def display_config(self, tag=None, prefix=None):
"""Display human-readable values of map, control, or sequence.
Args:
tag : string of either 'map' | 'control' | 'sequence' or None for all
prefix: prefix string to print infront of control tags
Returns:
string to be displayed.
"""
rsp = []
if tag is None:
tag_list = SYSCFG_TAG_LIST
else:
tag_list = [tag]
for tag in sorted(tag_list):
prefix_str = ''
if tag == CONTROL_TAG and prefix:
prefix_str = '%s.' % prefix
rsp.append('*************')
rsp.append('* ' + tag.upper())
rsp.append('*************')
max_len = max(len(name) for name in self.syscfg_dict[tag])
max_len += len(prefix_str)
dashes = '-' * max_len
for name in sorted(self.syscfg_dict[tag]):
item_dict = self.syscfg_dict[tag][name]
padded_name = '%-*s' % (max_len, '%s%s' % (prefix_str, name))
rsp.append('%s DOC: %s' % (padded_name, item_dict['doc']))
if tag == MAP_TAG:
rsp.append('%s MAP: %s' % (dashes, str(item_dict['map_params'])))
else:
rsp.append('%s GET: %s' % (dashes, str(item_dict['get_params'])))
rsp.append('%s SET: %s' % (dashes, str(item_dict['set_params'])))
return '\n'.join(rsp)
def test():
"""Integration test.
TODO(tbroch) Enhance integration test and add unittest (see mox)
"""
# pylint: disable=protected-access,raising-format-tuple,g-doc-exception
# Test method that's likely to be removed as more unit-tests roll out.
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
scfg = SystemConfig()
# TODO(tbroch) make this a comprenhensive test xml file
scfg.add_cfg_file(os.path.join('data', 'servo.xml'))
scfg.display_config()
control_dict = scfg._lookup('control', 'goog_rec_mode')
# checking mapping functionality
control_params = control_dict['get_params']
control_params = control_dict['set_params']
if 'map' in control_params:
map_name = control_params['map']
map_dict = scfg._lookup('map', map_name)
if not map_dict:
raise Exception('Unable to find map %s', map_name)
logging.info('')
for keyname, val in map_dict['map_params'].items():
resolved_val = str(scfg.resolve_val(control_params, keyname))
if resolved_val != val:
logging.error('resolve failed for %s -> %s != %s', keyname, val,
resolved_val)
# try re-mapping
reformat_val = scfg.reformat_val(control_params, int(val))
reformat_val = scfg.reformat_val(control_params, val)
if reformat_val != keyname:
logging.error('reformat failed for %s -> %s != %s', val, keyname,
reformat_val)
if __name__ == '__main__':
test()
|
StarcoderdataPython
|
1691552
|
<reponame>hmmartiny/LazyFunctions
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec as gridspec
"""
An attempt at making a class for drawing advanced dendrograms with Python.py
Both linear and circular (polar) dendrograms.
"""
class Dendogram:
"""Inspired by https://stackoverflow.com/a/52517178/15704316"""
def __init__(self, dendrogram, annotations):
self.icoord = dendrogram['icoord']
self.dcoord = dendrogram['dcoord']
self.leave_labels = dendrogram['ivl']
self.leaves = dendrogram['leaves']
self.get_leave_coords()
self.annotations = annotations
def get_leave_coords(self):
# flatten
icoord_flattened = [item for sublist in self.icoord for item in sublist]
dcoord_flattened = [item for sublist in self.dcoord for item in sublist]
# extract
leave_coords = [(x,y) for x,y in zip(icoord_flattened, dcoord_flattened) if y == 0]
# get leave order in dendrogram
leave_order = np.argsort([x for x, _ in leave_coords])
# map id to coordinates
self.leaveid_to_coord = dict(zip(
self.leaves,
[leave_coords[idx] for idx in leave_order]
))
self.coord_to_leave = {v: k for k, v in self.leaveid_to_coord.items()}
# map id to label
self.leaveid2leavelabel = {lid: ll for lid, ll in zip(self.leaves, self.leave_labels)}
def _branches(self, ax, icoords, dcoords, color='black', lw=1, limit=None, plot_leaves=True, leaf_args={}):
max_coord = 0
for i, (xs, ys) in enumerate(zip(icoords, dcoords)):
ax.plot(xs, ys, color="black", lw=lw)
if plot_leaves:
self._leaf(ax, xs, ys, **leaf_args)
if max(xs) > max_coord: max_coord = max(xs)
if isinstance(limit, int) and i > limit: break
ax.set_xlim(-.1, max_coord + 2)
def get_leaf_formatting(self, node, column, default):
try:
nf = self.annotations.loc[self.annotations.node == node, column].item()
except:
nf = default
return nf
def _leaf(self, ax, xs, ys, leaf_shape=None, leaf_color=None, leaf_size=2):
for x, y in zip(xs, ys):
if y == 0:
try:
leaf_id = self.coord_to_leave[(x,y)]
except:
continue
leaf_label = self.leaveid2leavelabel[leaf_id]
shape = self.get_leaf_formatting(node=leaf_label, column=leaf_shape, default='o')
color = self.get_leaf_formatting(node=leaf_label, column=leaf_color, default='black')
ax.plot(x, y, shape, color=color, ms=leaf_size)
def plot_linear(self, figsize, limit=None, plot_leaves=True, leaf_args = {}):
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(nrows=1, ncols=1)#, **gridspec_kw)
ax = fig.add_subplot(gs[0])
#ax_leaf = fig.add_subplot(gs[1], sharex=ax)
# plot branches and leaves
self._branches(ax, self.icoord, self.dcoord, limit=limit, plot_leaves=plot_leaves, leaf_args=leaf_args)
ax.get_xaxis().set_visible(False)
ax.set_ylim(-.1, ax.get_ylim()[1] + .1)
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
plt.close(fig)
return fig
def polar_leaf(self, ax, x, y, xo, yo, leaf_shape=None, leaf_color=None, leaf_size=2):
try:
leaf_id = self.coord_to_leave[(xo, yo)]
leaf_label = self.leaveid2leavelabel[leaf_id]
shape = self.get_leaf_formatting(node=leaf_label, column=leaf_shape, default='o')
color = self.get_leaf_formatting(node=leaf_label, column=leaf_color, default='black')
except:
shape, color = 'o', 'black'
ax.plot(x, y, shape, color=color, ms=leaf_size)
def plot_circular(self, figsize, limit=None, plot_leaves=True, leaf_args={}, lw=1):
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(nrows=1, ncols=1)
ax = fig.add_subplot(gs[0], polar=True)
dcoords = -np.log(np.asarray(self.dcoord) + 1)
gap = .1
icoord = np.asarray(self.icoord)
imax = icoord.max()
imin = icoord.min()
icoords = ((icoord - imin) / (imax - imin) * (1-gap) + gap / 2) * 2 * np.pi
#self._branches(ax, icoords, dcoords, plot_leaves=plot_leaves)
for i, (xs, ys) in enumerate(zip(icoords, dcoords)):
if plot_leaves:
for j, (x, y) in enumerate(zip(xs, ys)):
if y == 0:
xo, yo = (self.icoord[i][j], self.dcoord[i][j])
self.polar_leaf(ax, x, y, xo, yo, **leaf_args)
xss = np.concatenate([[xs[0]], np.linspace(xs[1], xs[2], 100), [xs[3]]])
yss = np.concatenate([[ys[0]], np.linspace(ys[1], ys[2], 100), [ys[3]]])
ax.plot(xss, yss, color='black', lw=lw)
if limit is not None and i > limit:
break
ax.spines['polar'].set_visible(False)
ax.xaxis.grid(False)
ax.set_xticklabels([])
plt.close(fig)
return fig
if __name__ == '__main__':
dnd = Dendogram(dend, anno)
leaf_args = {'leaf_color': 'gene_color', 'leaf_shape': 'shape','leaf_size': 2}
dnd.plot_linear(figsize=(10, 10), plot_leaves=True, leaf_args=leaf_args, limit=10)
dnd.plot_circular(figsize=(10, 10), plot_leaves=True, lw=.05, leaf_args=leaf_args, limit=10)
|
StarcoderdataPython
|
4812904
|
"""
Constants module
"""
class Status(object):
"""
Utim status class
"""
STATUS_NEWBORN = 'STATUS_NEWBORN'
STATUS_SRP = 'STATUS_SRP'
STATUS_CONFIGURING = 'STATUS_CONFIGURING'
STATUS_TESTING = 'STATUS_TESTING'
STATUS_DONE = 'STATUS_DONE'
STATUS_NO_CONFIG = 'STATUS_NO_CONFIG'
STATUS_DED = 'STATUS_DED'
class Flags(object):
"""
Utim flags object
"""
FLAG_NONE = 0
FLAG_RECONFIGURE = 1
|
StarcoderdataPython
|
3200656
|
<reponame>AdityaSidharta/shopee_data_science
import math
import torch
def get_batch_info(dataloader):
n_obs = len(dataloader.dataset)
batch_size = dataloader.batch_size
n_batch_per_epoch = math.ceil(n_obs / float(batch_size))
return n_obs, batch_size, n_batch_per_epoch
def img2tensor(img_array, device):
img_array = img_array.transpose((2, 0, 1))
return torch.from_numpy(img_array).float().to(device)
|
StarcoderdataPython
|
3320343
|
<gh_stars>1-10
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
@author: <NAME>
network head for classification.
The design is as follows:
... -> Feature Map -> MLP HEAD -> Classification loss
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from detectron.core.config import cfg
from detectron.utils.c2 import const_fill
from detectron.utils.c2 import gauss_fill
import detectron.utils.blob as blob_utils
def add_mlp_outputs(model, blob_in, dim):
"""Add classification ops."""
model.FC(
blob_in,
'logits',
dim,
model.num_classes,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
if not model.train: # == if test
# Only add softmax when testing; during training the softmax is combined
# with the label cross entropy loss for numerical stability
model.Softmax('logits', 'cls_prob', engine='CUDNN')
def add_mlp_losses(model):
"""Add losses for classification """
cls_prob, loss_cls = model.net.SoftmaxWithLoss(
['logits', 'labels_int32'], ['cls_prob', 'loss_cls'],
scale=model.GetLossScale()
)
loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls])
model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
model.AddLosses(['loss_cls'])
model.AddMetrics('accuracy_cls')
return loss_gradients
def add_Xmlp_head(model, blob_in, dim_in):
hidden_dims = cfg.CLASSIFICATION.MLP_HEADS_DIM
avg_kernel = cfg.CLASSIFICATION.FINAL_AVG_KERNEL
dropout_rate = cfg.CLASSIFICATION.DROPOUT_RATE
is_test = False
model.AveragePool(
blob_in,
'final_avg',
kernel=avg_kernel,
stride=1,
global_pooling=True,
)
blob_in = 'final_avg'
for i,hidden_dim in enumerate(hidden_dims):
model.FC(blob_in, 'fc'+str(6+i), dim_in , hidden_dim)
model.Relu('fc'+str(6+i), 'fc'+str(6+i))
if not model.train:
is_test = True
model.Dropout('fc'+str(6+i), 'drop_fc'+str(6+i),
ratio=dropout_rate, is_test=is_test)
blob_in = 'drop_fc'+str(6+i)
dim_in = hidden_dim
return blob_in, dim_in
|
StarcoderdataPython
|
179250
|
<filename>gdcv/db/base_match_state.py
from sqlalchemy import Column, String, Integer, BigInteger, PrimaryKeyConstraint
class BaseMatchState(object):
__table_args__ = (
PrimaryKeyConstraint('event_key', 'match_id', 'wall_time'),
)
event_key = Column(String(16), nullable=False) # Like 2017nyny
match_id = Column(String(16), nullable=False) # Like qm1
play = Column(Integer, nullable=False) # Accounts for replays
wall_time = Column(BigInteger, nullable=False)
mode = Column(String(16)) # pre_match, auto, teleop, post_match
time_remaining = Column(Integer) # Number of seconds remaining in mode
red_score = Column(Integer)
blue_score = Column(Integer)
|
StarcoderdataPython
|
1622160
|
<filename>tests/test_parser.py
"""Define tests for parser endpoints."""
import aiohttp
import pytest
from regenmaschine import Client
from .common import TEST_HOST, TEST_PASSWORD, TEST_PORT, load_fixture
@pytest.mark.asyncio
async def test_parsers_current(aresponses, authenticated_local_client):
"""Test getting all current parsers."""
async with authenticated_local_client:
authenticated_local_client.add(
f"{TEST_HOST}:{TEST_PORT}",
"/api/4/parser",
"get",
aresponses.Response(text=load_fixture("parser_response.json"), status=200),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
await client.load_local(TEST_HOST, TEST_PASSWORD, port=TEST_PORT, ssl=False)
controller = next(iter(client.controllers.values()))
data = await controller.parsers.current()
assert len(data) == 1
assert data[0]["name"] == "NOAA Parser"
@pytest.mark.asyncio
async def test_parsers_post_data(aresponses, authenticated_local_client):
"""Test pushing data to parser."""
async with authenticated_local_client:
authenticated_local_client.add(
f"{TEST_HOST}:{TEST_PORT}",
"/api/4/parser/data",
"post",
aresponses.Response(
text=load_fixture("parser_post_data_response.json"), status=200
),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
await client.load_local(TEST_HOST, TEST_PASSWORD, port=TEST_PORT, ssl=False)
controller = next(iter(client.controllers.values()))
payload = load_fixture("parser_post_data_payload.json")
data = await controller.parsers.post_data(payload)
assert len(data) == 2
assert data["message"] == "OK"
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.