id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3303651
|
<filename>pavement.py<gh_stars>1-10
from collections import OrderedDict
import sys
from importlib import import_module
from paver.easy import task, needs, path, sh, cmdopts, options
from paver.setuputils import setup, find_package_data, install_distutils_tasks
try:
from base_node_rpc.pavement_base import *
except ImportError:
import warnings
warnings.warn('Could not import `base_node_rpc` (expected during '
'install).')
sys.path.insert(0, '.')
import version
install_distutils_tasks()
DEFAULT_ARDUINO_BOARDS = ['uno']
PROJECT_PREFIX = [d for d in path('.').dirs()
if d.joinpath('Arduino').isdir()
and d.name not in ('build', )][0].name
module_name = PROJECT_PREFIX
package_name = module_name.replace('_', '-')
rpc_module = import_module(PROJECT_PREFIX)
VERSION = version.getVersion()
URL='http://github.com/sci-bots/%s.git' % package_name
PROPERTIES = OrderedDict([('package_name', package_name),
('display_name', package_name),
('manufacturer', 'Sci-Bots Inc.'),
('software_version', VERSION),
('url', URL)])
LIB_PROPERTIES = PROPERTIES.copy()
LIB_PROPERTIES.update(OrderedDict([('author', '<NAME>'),
('author_email', '<EMAIL>'),
('short_description', 'Stepper motor'
'controller based on the Arduino '
'base node RPC framework.'),
('version', VERSION),
('long_description', ''),
('category', 'Communication'),
('architectures', 'avr')]))
options(
rpc_module=rpc_module,
PROPERTIES=PROPERTIES,
LIB_PROPERTIES=LIB_PROPERTIES,
base_classes=['BaseNodeSerialHandler',
'BaseNodeEeprom',
'BaseNodeI2c',
'BaseNodeI2cHandler<Handler>',
'BaseNodeConfig<ConfigMessage, Address>',
'BaseNodeState<StateMessage>'],
rpc_classes=['stepper_motor_controller::Node'],
DEFAULT_ARDUINO_BOARDS=DEFAULT_ARDUINO_BOARDS,
setup=dict(name=package_name,
version=VERSION,
description=LIB_PROPERTIES['short_description'],
author='<NAME>',
author_email='<EMAIL>',
url=URL,
license='GPLv2',
install_requires=['base-node-rpc>=0.17'],
include_package_data=True,
packages=[str(PROJECT_PREFIX)]))
|
StarcoderdataPython
|
4806192
|
from .builder import (
PromtailBuilder
)
from .option import (
PromtailOptions
)
from .configfile import (
PromtailConfigFileOptions,
PromtailConfigFile,
)
from .configfileext import (
PromtailConfigFileExt_Kubernetes,
)
__version__ = "0.8.0"
__all__ = [
'PromtailOptions',
'PromtailBuilder',
'PromtailConfigFileOptions',
'PromtailConfigFile',
'PromtailConfigFileExt_Kubernetes',
]
|
StarcoderdataPython
|
14609
|
<gh_stars>0
import math as ma
# note all sizes in m^2
# all costs in pounds
def get_details():
return {'w': get_value("Width:", float),
'h': get_value("Height:", float),
'cost': get_value("Cost per tile:", float),}
def get_value(text = "enter_val", expected_type = None):
while True:
r_in = raw_input(text)
try:
r_in = expected_type(r_in)
return r_in
except ValueError:
print "Incorrect variable type entered. Expected: %s" % expected_type
def get_cost(d = {}):
for key in ['w', 'h', 'cost', 'tile_area']:
assert key in d
total_cost = d['w']*d['h']*d['cost']/d['tile_area']
return total_cost
if __name__ == "__main__":
vals = get_details()
vals['tile_area'] = 0.04 #0.2m squared tiles
print "\n > Total cost: %.2f\n" % get_cost(vals)
|
StarcoderdataPython
|
10446
|
#!/usr/bin/env python
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
setup(name='genderizer',
version='0.1.2.3',
license='MIT',
description='Genderizer tries to infer gender information looking at first name and/or making text analysis',
long_description=open('README.md').read(),
url='https://github.com/muatik/genderizer',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
packages=['genderizer'],
package_data={'genderizer': ['data/*']},
platforms='any')
|
StarcoderdataPython
|
3396369
|
# Merge Sorted Array
# You are given two integer arrays nums1 and nums2, sorted in non-decreasing order,
# and two integers m and n, representing the number of elements in nums1 and nums2 respectively.
# Merge nums1 and nums2 into a single array sorted in non-decreasing order.
# The final sorted array should not be returned by the function, but instead be
# stored inside the array nums1. To accommodate this, nums1 has a length of m + n,
# where the first m elements denote the elements that should be merged, and the last
# n elements are set to 0 and should be ignored. nums2 has a length of n.
# Example 1:
# Input: nums1 = [1,2,3,0,0,0], m = 3, nums2 = [2,5,6], n = 3
# Output: [1,2,2,3,5,6]
# Explanation: The arrays we are merging are [1,2,3] and [2,5,6].
# The result of the merge is [1,2,2,3,5,6] with the underlined elements coming from nums1.
# Example 2:
# Input: nums1 = [1], m = 1, nums2 = [], n = 0
# Output: [1]
# Explanation: The arrays we are merging are [1] and [].
# The result of the merge is [1].
# Example 3:
# Input: nums1 = [0], m = 0, nums2 = [1], n = 1
# Output: [1]
# Explanation: The arrays we are merging are [] and [1].
# The result of the merge is [1].
# Note that because m = 0, there are no elements in nums1. The 0 is only there to
# ensure the merge result can fit in nums1.
# Constraints:
# nums1.length == m + n
# nums2.length == n
# 0 <= m, n <= 200
# 1 <= m + n <= 200
# -109 <= nums1[i], nums2[j] <= 109
# Follow up: Can you come up with an algorithm that runs in O(m + n) time?
def mergeSortedArrays(nums1, nums2):
# Our Pointers
m, n = len(nums1) - 1, len(nums2) - 1
last = m + n - 1
# merge them in reverse order
while m > 0 and n > 0:
if nums1[m] > nums2[n]:
nums1[last] = nums1[m]
m -= 1
else:
nums1[last] = nums2[n]
n -= 1
last -= 1
while n > 0:
nums1[last] = nums2[n]
n, last = n-1, last-1
print(nums1)
# print(nums1, nums2)
mergeSortedArrays([1, 2, 3, 0, 0, 0], [2, 5, 6])
# nums1 = [1,2,3,0,0,0], m = 3, nums2 = [2,5,6], n = 3
|
StarcoderdataPython
|
4837615
|
<reponame>jdelrue/digital_me
from jumpscale import j
import inspect
# import imp
import sys
import os
from .GedisCmd import GedisCmd
JSBASE = j.application.jsbase_get_class()
class GedisCmds(JSBASE):
"""
all commands captured in a capnp object, which can be stored in redis or any other keyvaluestor
"""
def __init__(self,server=None, name="",path=None,capnpbin=None):
JSBASE.__init__(self)
if path is None:
raise RuntimeError("path cannot be None")
self.path=path
self.server = server
SCHEMA = """
@url = jumpscale.gedis.cmd
@name = GedisCmds
name = ""
comment = ""
code = ""
schema_in = ""
schema_out = ""
args = ""
@url = jumpscale.gedis.api
@name = GedisServerSchema
namespace = ""
cmds = (LO) !jumpscale.gedis.cmd
"""
j.data.schema.schema_add(SCHEMA)
self.schema = j.data.schema.schema_get(url="jumpscale.gedis.api")
self._cmds = {}
if capnpbin:
self.data = self.schema.get(capnpbin=capnpbin)
else:
dname = j.sal.fs.getDirName(path)
if dname not in sys.path:
sys.path.append(dname)
classname = self._class_find_name()
exec("from %s import %s" % (classname, classname))
class_ = eval(classname)
self.server.classes[name] = class_()
# j.shell()
self.data = self.schema.new()
self.data.name = name
self.data.namespace = name
for name,item in inspect.getmembers(class_):
if name.startswith("_"):
continue
if name.startswith("logger"):
continue
if name in ["cache"]:
continue
if inspect.isfunction(item):
cmd = self.data.cmds.new()
cmd.name = name
code = inspect.getsource(item)
cmd.code,cmd.comment,cmd.schema_in, cmd.schema_out, cmd.args= self._method_source_process(code)
@property
def name(self):
return self.data.name
@property
def cmds(self):
if self._cmds == {}:
print('\n\nPopulating commands for namespace(%s)\n' % self.data.namespace)
for cmd in self.data.cmds:
print("\tpopulata: %s"%(cmd.name))
self._cmds[cmd.name] = GedisCmd(self,cmd)
print('\n')
return self._cmds
def _class_find_name(self):
txt = j.sal.fs.fileGetContents(self.path)
for line in txt.split("\n"):
if line.strip().startswith("class"):
pre = line.split("(")[0]
classname = pre.split(" ")[1].strip()
return classname
raise RuntimeError("did not find class name in %s"%self.path)
def _method_source_process(self,txt):
"""
return code,comment,schema_in, schema_out
"""
txt=j.data.text.strip(txt)
code = ""
comment = ""
schema_in = ""
schema_out = ""
args = ""
state="START"
for line in txt.split("\n"):
lstrip = line.strip().lower()
if state=="START" and lstrip.startswith("def"):
state = "DEF"
if "self" in lstrip:
if "," in lstrip:
arg0,arg1=lstrip.split(",",1)
args,_ = arg1.split(")",1)
else:
args = ""
else:
arg0,arg1=lstrip.split("(",1)
args,_ = arg1.split(")",1)
continue
if lstrip.startswith("\"\"\""):
if state=="DEF":
state="COMMENT"
continue
if state=="COMMENT":
state="CODE"
continue
raise RuntimeError()
if lstrip.startswith("```") or lstrip.startswith("'''"):
if state.startswith("SCHEMA"): #are already in schema go back to comment
state="COMMENT"
continue
if state=="COMMENT": #are in comment, now found the schema
if lstrip.endswith("out"):
state="SCHEMAO"
else:
state="SCHEMAI"
continue
raise RuntimeError()
if state=="SCHEMAI":
schema_in+="%s\n"%line
continue
if state=="SCHEMAO":
schema_out+="%s\n"%line
continue
if state=="COMMENT":
comment+="%s\n"%line
continue
if state=="CODE" or state=="DEF":
code+="%s\n"%line
continue
raise RuntimeError()
return j.data.text.strip(code),j.data.text.strip(comment),j.data.text.strip(schema_in),\
j.data.text.strip(schema_out),j.data.text.strip(args)
def cmd_exists(self,name):
return name in self.children
def __repr__(self):
path2 = self.path.split("github")[-1].strip("/")
return 'CMDS:%s' % (path2)
__str__ = __repr__
|
StarcoderdataPython
|
130290
|
from linode_api4.errors import UnexpectedResponseError
from linode_api4.objects import Base, Property
class AuthorizedApp(Base):
api_endpoint = "/profile/apps/{id}"
properties = {
"id": Property(identifier=True),
"scopes": Property(),
"label": Property(),
"created": Property(is_datetime=True),
"expiry": Property(is_datetime=True),
"thumbnail_url": Property(),
"website": Property(),
}
class PersonalAccessToken(Base):
api_endpoint = "/profile/tokens/{id}"
properties = {
"id": Property(identifier=True),
"scopes": Property(),
"label": Property(mutable=True),
"created": Property(is_datetime=True),
"token": Property(),
"expiry": Property(is_datetime=True),
}
class WhitelistEntry(Base):
api_endpoint = "/profile/whitelist/{id}"
properties = {
'id': Property(identifier=True),
'address': Property(),
'netmask': Property(),
'note': Property(),
}
class Profile(Base):
api_endpoint = "/profile"
id_attribute = 'username'
properties = {
'username': Property(identifier=True),
'uid': Property(),
'email': Property(mutable=True),
'timezone': Property(mutable=True),
'email_notifications': Property(mutable=True),
'referrals': Property(),
'ip_whitelist_enabled': Property(mutable=True),
'lish_auth_method': Property(mutable=True),
'authorized_keys': Property(mutable=True),
'two_factor_auth': Property(),
'restricted': Property(),
}
def enable_tfa(self):
"""
Enables TFA for the token's user. This requies a follow-up request
to confirm TFA. Returns the TFA secret that needs to be confirmed.
"""
result = self._client.post('/profile/tfa-enable')
return result['secret']
def confirm_tfa(self, code):
"""
Confirms TFA for an account. Needs a TFA code generated by enable_tfa
"""
self._client.post('/profile/tfa-enable-confirm', data={
"tfa_code": code
})
return True
def disable_tfa(self):
"""
Turns off TFA for this user's account.
"""
self._client.post('/profile/tfa-disable')
return True
@property
def grants(self):
"""
Returns grants for the current user
"""
from linode_api4.objects.account import UserGrants # pylint: disable-all
resp = self._client.get('/profile/grants') # use special endpoint for restricted users
grants = None
if resp is not None:
# if resp is None, we're unrestricted and do not have grants
grants = UserGrants(self._client, self.username, resp)
return grants
@property
def whitelist(self):
"""
Returns the user's whitelist entries, if whitelist is enabled
"""
return self._client._get_and_filter(WhitelistEntry)
def add_whitelist_entry(self, address, netmask, note=None):
"""
Adds a new entry to this user's IP whitelist, if enabled
"""
result = self._client.post("{}/whitelist".format(Profile.api_endpoint),
data={
"address": address,
"netmask": netmask,
"note": note,
})
if not 'id' in result:
raise UnexpectedResponseError("Unexpected response creating whitelist entry!")
return WhitelistEntry(result['id'], self._client, json=result)
class SSHKey(Base):
"""
An SSH Public Key uploaded to your profile for use in Linode Instance deployments.
"""
api_endpoint = '/profile/sshkeys/{id}'
properties = {
"id": Property(identifier=True),
"label": Property(mutable=True),
"ssh_key": Property(),
"created": Property(is_datetime=True),
}
|
StarcoderdataPython
|
1724977
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from warnings import warn
from iminuit.iminuit_warnings import InitialParamWarning
from iminuit import util as mutil
import numpy as np
def pedantic(self, parameters, kwds, errordef):
def w(msg):
warn(msg, InitialParamWarning, stacklevel=3)
for vn in parameters:
if vn not in kwds:
w('Parameter %s does not have initial value. Assume 0.' % vn)
if 'error_' + vn not in kwds and 'fix_' + mutil.param_name(vn) not in kwds:
w('Parameter %s is floating but does not have initial step size. Assume 1.' % vn)
for vlim in mutil.extract_limit(kwds):
if mutil.param_name(vlim) not in parameters:
w('%s is given. But there is no parameter %s. Ignore.' % (vlim, mutil.param_name(vlim)))
for vfix in mutil.extract_fix(kwds):
if mutil.param_name(vfix) not in parameters:
w('%s is given. But there is no parameter %s. Ignore.' % (vfix, mutil.param_name(vfix)))
for verr in mutil.extract_error(kwds):
if mutil.param_name(verr) not in parameters:
w('%s float. But there is no parameter %s. Ignore.' % (verr, mutil.param_name(verr)))
if errordef is None:
w('errordef is not given. Default to 1.')
def draw_profile(self, vname, x, y, s=None, band=True, text=True):
from matplotlib import pyplot as plt
x = np.array(x)
y = np.array(y)
if s is not None:
s = np.array(s, dtype=bool)
x = x[s]
y = y[s]
plt.plot(x, y)
plt.grid(True)
plt.xlabel(vname)
plt.ylabel('FCN')
try:
minpos = np.argmin(y)
# Scan to the right of minimum until greater than min + errordef.
# Note: We need to find the *first* crossing of up, right from the
# minimum, because there can be several. If the loop is replaced by
# some numpy calls, make sure that this property is retained.
yup = self.errordef + y[minpos]
best = float("infinity")
for i in range(minpos, len(y)):
z = abs(y[i] - yup)
if z < best:
rightpos = i
best = z
else:
break
else:
raise ValueError("right edge not found")
# Scan to the left of minimum until greater than min + errordef.
best = float("infinity")
for i in range(minpos, 0, -1):
z = abs(y[i] - yup)
if z < best:
leftpos = i
best = z
else:
break
else:
raise ValueError("left edge not found")
plt.plot([x[leftpos], x[minpos], x[rightpos]],
[y[leftpos], y[minpos], y[rightpos]], 'o')
if band:
plt.axvspan(x[leftpos], x[rightpos], facecolor='g', alpha=0.5)
if text:
plt.title('%s = %.3g - %.3g + %.3g (scan)' % (vname, x[minpos],
x[minpos] - x[leftpos],
x[rightpos] - x[minpos]),
fontsize="large")
except ValueError:
warn(RuntimeWarning('band and text is requested but '
'the bound is too narrow.'))
return x, y, s
def draw_contour(self, x, y, bins=20, bound=2, args=None, show_sigma=False):
from matplotlib import pyplot as plt
vx, vy, vz = self.contour(x, y, bins, bound, args, subtract_min=True)
v = [self.errordef * ((i + 1) ** 2) for i in range(bound)]
CS = plt.contour(vx, vy, vz, v, colors=['b', 'k', 'r'])
if not show_sigma:
plt.clabel(CS, v)
else:
tmp = dict((vv, r'%i $\sigma$' % (i + 1)) for i, vv in enumerate(v))
plt.clabel(CS, v, fmt=tmp, fontsize=16)
plt.xlabel(x)
plt.ylabel(y)
plt.axhline(self.values[y], color='k', ls='--')
plt.axvline(self.values[x], color='k', ls='--')
plt.grid(True)
return vx, vy, vz
def draw_mncontour(self, x, y, nsigma=2, numpoints=20):
from matplotlib import pyplot as plt
from matplotlib.contour import ContourSet
c_val = []
c_pts = []
for sigma in range(1, nsigma + 1):
pts = self.mncontour(x, y, numpoints, sigma)[2]
# close curve
pts.append(pts[0])
c_val.append(sigma)
c_pts.append([pts]) # level can have more than one contour in mpl
cs = ContourSet(plt.gca(), c_val, c_pts)
plt.clabel(cs, inline=1, fontsize=10)
plt.xlabel(x)
plt.ylabel(y)
return cs
|
StarcoderdataPython
|
1782975
|
import sys
sys.path.append('.')
import cfg
import async_session
while True:
sess = async_session.AsyncSession(cfg.timeout_read)
sess.open_session()
with sess.clone_session(community='woho') as priv_sess:
to_set = {
async_session.oid_str_to_tuple('1.3.6.1.2.1.1.6.0'): ('f', 's')
}
try:
priv_sess.set_oids(to_set)
except async_session.SNMPTimeoutError:
pass
|
StarcoderdataPython
|
1711989
|
"""Controls digital outputs added with a 74HC595 shift register.
Description
-----------
A CircuitPython program that interfaces a 74HC595 serial-in parallel-out shift
register IC to add digital outputs to a CircuitPython compatible board.
Circuit
-------
- A 74HC595 shift register IC is connected to the board's SPI serial bus and D5
pins.
- The SPI SCK pin is connected to the 74HC595 SRCLK (11) pin.
- The SPI MOSI pin is connected to the 74HC595 SER (14) pin.
- The D5 pin is connected to the 74HC595 RCLK (12) pin.
- 8 LEDs are connected, via resistors, to the 74HC595's output pins
(QA - QH).
Libraries/Modules
-----------------
- time Standard Library
- https://docs.python.org/3/library/time.html
- Access to sleep function.
- board CircuitPython Core Module
- https://circuitpython.readthedocs.io/en/latest/shared-bindings/board/
- Access to board's GPIO pins and hardware.
- digitalio CircuitPython Core Module
- https://circuitpython.readthedocs.io/en/latest/shared-bindings/digitalio/
- Provides basic digital pin I/O support.
- Adafruit_CircuitPython_74HC595 CircuitPython Driver Library
- https://circuitpython.readthedocs.io/projects/74hc595/
- Provides support for 74HC595 shift register IC.
Notes
-----
- Provides examples for multiple approaches of visualizing and sending digital
output data to the 74HC595 shift register IC.
- This program assumes a single 74HC595 shift register IC is being utilized.
If two or more '595s are daisy chained together, change the
SHIFT_REGISTERS_NUM constant to the actual number of '595s being used.
See function specific comments for additional details.
- Comments are Sphinx (reStructuredText) compatible.
TODO
----
- None.
Author(s)
---------
- Created by <NAME> on 04/13/2021.
- Modified by <NAME> on 06/14/2021.
Copyright (c) 2021 Woolsey Workshop. All rights reserved.
Members
-------
"""
# Imports
from time import sleep
import board
import digitalio
import adafruit_74hc595
# Pin Mapping
osr_latch_pin = digitalio.DigitalInOut(board.D5)
"""The pin connected to the 74HC595 RCLK (12) pin, used for latching data."""
# Global Constants
SHIFT_REGISTERS_NUM = 1
"""The number of daisy chained 74HC595 shift registers."""
# Global Instances
osr = adafruit_74hc595.ShiftRegister74HC595(board.SPI(), osr_latch_pin, SHIFT_REGISTERS_NUM)
"""The instance of the connected 74HC595 shift register IC."""
# Functions
def change_single_outputs():
"""Example code for setting an individual shift register output with each
write.
This approach should be the most familiar to CircuitPython users. It uses
the same mechanism as setting standard GPIO pin values, but may involve more
shift operations than other approaches since only one output at a time can
be changed.
"""
# Output pin definitions (pin references)
led_0 = osr.get_pin(0)
led_1 = osr.get_pin(1)
led_2 = osr.get_pin(2)
led_3 = osr.get_pin(3)
led_4 = osr.get_pin(4)
led_5 = osr.get_pin(5)
led_6 = osr.get_pin(6)
led_7 = osr.get_pin(7)
# Set individual LEDs
led_1.value = True # turn on LED 1 only
sleep(1)
led_1.value = False # turn off LED 1 only
led_6.value = True # turn on LED 6 only
sleep(1)
led_6.value = False # turn off LED 6 only
sleep(1)
# Set multiple LEDs
led_0.value = True # turn on even numbered LEDs
led_2.value = True
led_4.value = True
led_6.value = True
sleep(1)
led_0.value = False # turn off even numbered LEDs
led_2.value = False
led_4.value = False
led_6.value = False
led_1.value = True # turn on odd numbered LEDs
led_3.value = True
led_5.value = True
led_7.value = True
sleep(1)
led_1.value = False # turn off odd numbered LEDs
led_3.value = False
led_5.value = False
led_7.value = False
sleep(1)
def change_outputs_with_binary_values():
"""Example code for setting all shift register outputs with each write using
binary values (1 = True, 0 = False).
This approach produces the most concise code, but does not indicate the
meaning of each of the outputs.
If daisy chaining multiple shift registers together, set the appropriate
byte (index) of the outputs variable.
"""
outputs = osr.gpio # retrieve current shift register output values
# Set individual LEDs
outputs[0] = 0b00000010 # turn on LED 1 only
osr.gpio = outputs # set new shift register output values
sleep(1)
outputs[0] = 0b01000000 # turn on LED 6 only
osr.gpio = outputs
sleep(1)
outputs[0] = 0b00000000 # turn off all LEDs
osr.gpio = outputs
sleep(1)
# Set multiple LEDs
outputs[0] = 0b01010101 # turn on only even numbered LEDs
osr.gpio = outputs
sleep(1)
outputs[0] = 0b10101010 # turn on only odd numbered LEDs
osr.gpio = outputs
sleep(1)
outputs[0] = 0b00000000 # turn off all LEDs
osr.gpio = outputs
sleep(1)
def change_outputs_with_defined_names():
"""Example code for setting all shift register outputs with each write using
named outputs.
This approach provides the ability to use named outputs with single shift
register writes, but all outputs must still be represented with each write.
Only include named outputs to set True, everything else will be set False.
If daisy chaining multiple shift registers together, set the appropriate
byte (index) of the outputs variable.
"""
# Output pin definitions (bit positions)
led_0 = 0b00000001
led_1 = 0b00000010
led_2 = 0b00000100
led_3 = 0b00001000
led_4 = 0b00010000
led_5 = 0b00100000
led_6 = 0b01000000
led_7 = 0b10000000
outputs = osr.gpio # retrieve current shift register output values
# Set individual LEDs
outputs[0] = led_1 # turn on LED 1 only
osr.gpio = outputs # set new shift register output values
sleep(1)
outputs[0] = led_6 # turn on LED 6 only
osr.gpio = outputs
sleep(1)
outputs[0] = 0 # turn off all LEDs
osr.gpio = outputs
sleep(1)
# Set multiple LEDs
outputs[0] = led_0 | led_2 | led_4 | led_6 # turn on only even numbered LEDs
osr.gpio = outputs
sleep(1)
outputs[0] = led_1 | led_3 | led_5 | led_7 # turn on only odd numbered LEDs
osr.gpio = outputs
sleep(1)
outputs[0] = 0 # turn off all LEDs
osr.gpio = outputs
sleep(1)
def cycle_leds():
"""Example code that continuously cycles through the LEDs (end to end)."""
leds = [osr.get_pin(n) for n in range(8 * osr.number_of_shift_registers)]
for position, led in enumerate(leds):
if position == len(leds) - 1: break # skip the last LED
led.value = True
sleep(0.1)
led.value = False
for position, led in enumerate(reversed(leds)):
if position == len(leds) - 1: break # skip the first LED
led.value = True
sleep(0.1)
led.value = False
def main():
"""Main program entry."""
while True:
change_single_outputs()
change_outputs_with_binary_values()
change_outputs_with_defined_names()
# cycle_leds()
if __name__ == "__main__": # required for generating Sphinx documentation
main()
|
StarcoderdataPython
|
3374325
|
#!/usr/bin/python
import argparse
from book import Book
# parse some input params
# $booklog <title>: show status of book (if book exists or not, show similar titles to fix misspell)
# -h, --help: show help info
# -n, --note <title>: Edit book notes
# -a, --author <firstname lastname>: author name
# -i, --isbn <number>: isbn number of book
# -e, --edit <title>
# -t, --tag <tag>: add tag to book
# example
# $booklog -n The Way of Kings -a <NAME> -t fantasy fiction
# $booklog "The Way of Kings"
# >> The Way of Kings by <NAME>
# >> Tags: Fantasy, Fiction
# >> Notes: ...
parser = argparse.ArgumentParser(prog='booklog', description='Log your books.')
# Optionals
parser.add_argument('-t','--title', help='title of a book')
parser.add_argument('-a', '--author', help='author of a book')
parser.add_argument('-n', '--note', help='edit book note')
parser.add_argument('-l', '--list', help='add book to list')
args = parser.parse_args()
if args.title:
bk1 = Book(args.title, args.author, args.list)
bk1.writeBook()
|
StarcoderdataPython
|
1773150
|
<reponame>abdessamad14/zellij
"""
Test euclid.py
"""
import itertools
import math
from hypothesis import assume, given
from hypothesis.strategies import lists, integers
import pytest
from zellij.euclid import (
Line, Point, Segment, Bounds, EmptyBounds,
along_the_way, collinear, line_collinear,
CoincidentLines, ParallelLines,
)
from zellij.postulates import adjacent_pairs, all_pairs
from .hypo_helpers import ipoints, t_zero_one
# Points
@pytest.mark.parametrize("p1, p2, result", [
((0, 0), (0, 0), True),
((0, 0), (1, 0), False),
((0, 0), (0.000000001, 0), False),
((0, 0), (0.001, 0), False),
])
def test_point_equality(p1, p2, result):
assert (Point(*p1) == Point(*p2)) == result
@pytest.mark.parametrize("p1, p2, result", [
((0, 0), (0, 0), True),
((0, 0), (1, 0), False),
((0, 0), (0.000000001, 0), True),
((0, 0), (0.001, 0), False),
])
def test_point_is_close(p1, p2, result):
assert Point(*p1).is_close(Point(*p2)) == result
@pytest.mark.parametrize("p1, p2, result", [
((0, 0), (1, 1), 1.4142135623730951),
((10, 10), (10, 10), 0),
((100, 100), (103, 104), 5),
])
def test_point_distance(p1, p2, result):
assert math.isclose(Point(*p1).distance(Point(*p2)), result)
@pytest.mark.parametrize("p1, p2, p3, result", [
((0, 0), (1, 1), (10, 10), True),
((0, 0), (1, 1), (100, 200), False),
((0, 0), (1, 1), (1000000, 1000001), False),
((0, 0), (1, 1), (10.000000001, 10), True),
((131.1614964698457, -376.12499999999994), (131.16149646984576, -404.17837588253866), (131.16149646984567, -363.1644750675048), False),
((131.16149646984576, -404.17837588253866), (131.1614964698457, -376.12499999999994), (131.16149646984567, -363.1644750675048), True),
])
def test_points_collinear(p1, p2, p3, result):
assert collinear(Point(*p1), Point(*p2), Point(*p3)) == result
@given(ipoints, ipoints, t_zero_one)
def test_hypo_points_collinear(p1, p2, t):
# If I pick a point that is a linear combination of two points, it should
# be considered collinear. The value of t determines in what order the
# points are collinear. We check for not-collinear, but only if t is away
# from the fuzzy areas near zero and one, and if p1 and p2 are separated.
p3 = along_the_way(p1, p2, t)
if t < 0:
assert collinear(p3, p1, p2)
if t < 0.01 and p1 != p2:
assert not collinear(p1, p3, p2)
assert not collinear(p1, p2, p3)
elif t <= 1:
assert collinear(p1, p3, p2)
if 0.01 < t < 0.99 and p1 != p2:
assert not collinear(p3, p1, p2)
assert not collinear(p1, p2, p3)
else:
assert collinear(p1, p2, p3)
if t > 1.01 and p1 != p2:
assert not collinear(p3, p1, p2)
assert not collinear(p1, p3, p2)
@given(ipoints, ipoints, t_zero_one)
def test_hypo_points_not_collinear(p1, p2, t):
# If I pick a point that is a linear combination of two points, it should
# not be considered collinear with a line that is offset from the two points.
if p1.distance(p2) < 1:
# If the endpoints are too close together, the floats get unwieldy.
return
p3 = along_the_way(p1, p2, t)
next_to = Line(p1, p2).offset(1)
p1o, p2o = next_to.p1, next_to.p2
assert not collinear(p1o, p2o, p3)
# Lines
@pytest.mark.parametrize("p1, p2, angle", [
((0, 0), (1, 0), 0),
((0, 0), (0, 1), 90),
((10, 10), (0, 20), 135),
])
def test_line_angle(p1, p2, angle):
l = Line(Point(*p1), Point(*p2))
assert math.isclose(l.angle(), angle)
@pytest.mark.parametrize("p1, p2, p3, p4, pi", [
((-1, 0), (1, 0), (0, -1), (0, 1), (0, 0)),
((17, 34), (23, 42), (100, 200), (300, 350), (194.85714285, 271.14285714)),
])
def test_intersect(p1, p2, p3, p4, pi):
l1 = Line(Point(*p1), Point(*p2))
l2 = Line(Point(*p3), Point(*p4))
assert l1.intersect(l2).is_close(Point(*pi))
@pytest.mark.parametrize("p1, p2, p3, p4, err", [
# Two identical lines.
((-1, 0), (1, 0), (-1, 0), (1, 0), CoincidentLines),
((-1, 0), (1, 0), (-2, 0), (2, 0), CoincidentLines),
# Two parallel lines.
((-1, 0), (1, 0), (-2, 1), (2, 1), ParallelLines),
])
def test_no_intersection(p1, p2, p3, p4, err):
l1 = Line(Point(*p1), Point(*p2))
l2 = Line(Point(*p3), Point(*p4))
with pytest.raises(err):
l1.intersect(l2)
def test_offset():
l1 = Line(Point(10, 10), Point(13, 14))
l2 = l1.offset(10)
assert l2.p1 == Point(18, 4)
assert l2.p2 == Point(21, 8)
@given(ipoints, ipoints, ipoints)
def test_parallel(p1, p2, p3):
# Make a line, and another line parallel to it through p3.
l = Line(p1, p2)
lpar = l.parallel(p3)
# Property: lpar should go through p3.
assert lpar.p1 == p3
# Property: l and lpar should have the same angle.
assert lpar.angle() == l.angle()
@given(ipoints, ipoints, ipoints)
def test_perpendicular(p1, p2, p3):
assume(p1 != p2)
l = Line(p1, p2)
foot = l.foot(p3)
perp = l.perpendicular(p3)
print(foot)
print(perp)
# Property: foot should be on l.
assert line_collinear(p1, p2, foot)
# Property: foot should be on perp.
assert line_collinear(perp.p1, perp.p2, foot)
# Property: perp's angle should be 90 degrees from l's.
angle_between = l.angle() - perp.angle()
assert math.isclose(angle_between % 180, 90)
# Segments
SEGMENT_INTERSECTIONS = [
# Good intersection.
((0, 1), (2, 1), (1, 0), (1, 2), (1, 1)),
# lines intersect, but segments don't.
((0, 1), (2, 1), (1, 2), (1, 4), None),
((0, 1), (2, 1), (3, 0), (3, 2), None),
((1, 2), (1, 4), (3, 1), (5, 1), None),
# lines are parallel.
((0, 1), (2, 1), (1, 3), (3, 3), None),
# lines are coincident, segments don't overlap.
((0, 1), (2, 1), (3, 1), (5, 1), None),
]
@pytest.mark.parametrize("p1, p2, p3, p4, isect", SEGMENT_INTERSECTIONS)
def test_segment_intersection(p1, p2, p3, p4, isect):
seg12 = Segment(p1, p2)
seg34 = Segment(p3, p4)
assert seg12.intersect(seg34) == isect
@pytest.mark.parametrize("p1, p2, p3, p4, isect", SEGMENT_INTERSECTIONS)
def test_segment_touches(p1, p2, p3, p4, isect):
seg12 = Segment(p1, p2)
seg34 = Segment(p3, p4)
if isect is None:
assert not seg12.touches(seg34)
else:
assert seg12.touches(seg34)
SEGMENT_INTERSECTION_ERRORS = [
# lines are coincident, segments do overlap.
((0, 1), (2, 1), (1, 1), (3, 1), CoincidentLines),
((1, -5), (-1, -5), (-5, -5), (0, -5), CoincidentLines),
]
@pytest.mark.parametrize("p1, p2, p3, p4, err", SEGMENT_INTERSECTION_ERRORS)
def test_segment_intersect_error(p1, p2, p3, p4, err):
with pytest.raises(err):
assert Segment(p1, p2).intersect(Segment(p3, p4))
@pytest.mark.parametrize("p1, p2, p3, p4, err", SEGMENT_INTERSECTION_ERRORS)
def test_segment_touches_errors(p1, p2, p3, p4, err):
assert err == CoincidentLines # ick
assert Segment(p1, p2).touches(Segment(p3, p4))
@given(ipoints, ipoints, lists(integers(min_value=1, max_value=99), min_size=1, max_size=5, unique=True))
def test_segment_sort_along(p1, p2, tvals):
# Get rid of pathological cases.
assume(p1.distance(p2) > 0.001)
tvals = [t / 100 for t in tvals]
fuzz = [1e-10, -1e-10]
points = [along_the_way(p1, p2, t) for t in tvals]
points = [Point(x+f, y+f) for (x, y), f in zip(points, itertools.cycle(fuzz))]
# Calculate the smallest distance between any pair of points. If we get
# the wrong answer from sort_along, then the total distance will be off by
# at least twice this.
min_gap = min(q1.distance(q2) for q1, q2 in all_pairs(points + [p1, p2]))
seg = Segment(p1, p2)
spoints = seg.sort_along(points)
assert len(spoints) == len(points)
assert all(pt in points for pt in spoints)
original = Point(*p1).distance(Point(*p2))
total = (
Point(*p1).distance(Point(*spoints[0])) +
sum(Point(*p).distance(Point(*q)) for p, q in adjacent_pairs(spoints)) +
Point(*spoints[-1]).distance(Point(*p2))
)
# The total distance will be wrong by at least 2*min_gap if it is wrong.
assert total - original < 2 * min_gap
# Bounds
@given(lists(ipoints, min_size=1))
def test_bounds_points(pts):
bounds = Bounds.points(pts)
assert_good_bounds(bounds, pts)
def assert_good_bounds(bounds, pts):
"""Assert that `bounds` is the right bounds for `pts`."""
assert bounds.llx <= bounds.urx
assert bounds.lly <= bounds.ury
# The bounds must contain all the points.
assert all(bounds.llx <= pt.x <= bounds.urx for pt in pts)
assert all(bounds.lly <= pt.y <= bounds.ury for pt in pts)
# Each edge of the bounds must touch at least one point.
assert any(bounds.llx == pt.x for pt in pts)
assert any(bounds.lly == pt.y for pt in pts)
assert any(bounds.urx == pt.x for pt in pts)
assert any(bounds.ury == pt.y for pt in pts)
@given(lists(ipoints, min_size=2))
def test_bounds_union(pts):
# Compute the bounds of the even and odd separately.
b0 = Bounds.points(pts[::2])
b1 = Bounds.points(pts[1::2])
# The union should be correct.
b = b0 | b1
assert_good_bounds(b, pts)
@given(lists(ipoints, min_size=4, max_size=8, unique=True))
def test_bounds_overlap(pts):
b0 = Bounds.points(pts[::2])
b1 = Bounds.points(pts[1::2])
assume(b0.width > 0 and b0.height > 0)
assume(b1.width > 0 and b1.height > 0)
assume(b0 != b1)
print(b0, b1)
if b0.overlap(b1):
# If they overlap, then one of the corners of b0 must be in b1,
# or vice-versa, or the side Segments must touch.
b0_in_b1 = any(pt in b1 for pt in b0.corners())
b1_in_b0 = any(pt in b0 for pt in b1.corners())
sides0 = list(b0.sides())
sides1 = list(b1.sides())
sides_touch = any(s0.touches(s1) for s0 in sides0 for s1 in sides1)
assert b0_in_b1 or b1_in_b0 or sides_touch
else:
# If they don't overlap, then none of the corners is in the other.
assert not any(pt in b1 for pt in b0.corners())
assert not any(pt in b0 for pt in b1.corners())
@given(lists(ipoints, min_size=2))
def test_empty_bounds(pts):
bounds = EmptyBounds()
for pt in pts:
bounds |= Bounds.points([pt])
assert bounds == Bounds.points(pts)
|
StarcoderdataPython
|
3341945
|
from bcg.run_BCG import BCG
from bcg.model_init import Model
import autograd.numpy as np
class Model_l1_ball(Model):
def minimize(self, gradient_at_x=None):
result = np.zeros(self.dimension)
if gradient_at_x is None:
result[0] = 1
else:
i = np.argmax(np.abs(gradient_at_x))
result[i] = -1 if gradient_at_x[i] > 0 else 1
return result
dimension = 100
l1Ball = Model_l1_ball(dimension) # initialize the feasible region as a L1 ball of dimension 50000
# define function evaluation oracle and its gradient oracle
# the following example function is ||x||_2^2, where x is a n dimension vector
# CAREFUL: the gradient of the norm is not defined at 0!
#
# def f(x):
# return np.linalg.norm(x, ord=2)**2
#
# this will not work with autograd! alternatively pass the correct gradient for the norm^2
#
# def f_grad(x):
# return 2*x
def f(x):
return np.dot(x,x)
res = BCG(f, None, l1Ball)
# res = BCG(f, f_grad, l1Ball)
print('optimal solution {}'.format(res[0]))
print('dual_bound {}'.format(res[1]))
print('primal value {}'.format(res[2]))
|
StarcoderdataPython
|
3397826
|
<filename>server/__init__.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import registry
from sqlalchemy.ext.automap import automap_base
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
from server.config import Config
db = SQLAlchemy()
# db_base = automap_base()
# db_base = registry()
bcrypt = Bcrypt()
login_manager = LoginManager()
mail = Mail()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
# with app.app_context():
# db.create_all()
# db_base.prepare(db.engine, reflect=True)
bcrypt.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
from server.search.routes import search
from server.book.routes import books
from server.loan.routes import loans
from server.customers.routes import customers
from server.users.routes import users, users_unsecure
from server.main.routes import main, main_unsecure
from server.v1.routes import public_api
app.register_blueprint(search)
app.register_blueprint(books)
app.register_blueprint(loans)
app.register_blueprint(customers)
app.register_blueprint(users)
app.register_blueprint(users_unsecure)
app.register_blueprint(main)
app.register_blueprint(main_unsecure)
app.register_blueprint(public_api)
return app
|
StarcoderdataPython
|
192407
|
import pytest
@pytest.mark.parametrize(
"server_options,port",
[("--debug", 8090), ("--debug --mathjax", 8090), ("--debug", 9090)],
)
@pytest.mark.parametrize("method", ["curl", "stdin"])
def test_math(browser, Server, server_options, port, method):
with Server(server_options, port) as srv:
srv.send(method, "tests/test_math.md")
result = browser.get(srv.port)
assert '<span class="test-case"></span>' in result, "No text was rendered"
latex_envs = (r"\begin", r"\end", "$$")
if "--mathjax" in server_options:
assert all(env not in result for env in latex_envs), (
"LaTeX equations should have been rendered as MathJax, "
"but that did not happen"
)
else:
assert all(
env in result for env in latex_envs
), "LaTeX equations are not left as it is"
|
StarcoderdataPython
|
3303981
|
# -*- coding: utf-8 -*-
"""
Copyright 2017-2018 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from mirage.flow import Workflow
from mirage.command import log, command
from mirage.generate.model_template import create_model_class
class DjangoModelMakeFlow(Workflow):
def main(self):
datatypes = """
Data Type: Rerating Fields:
string CharField
auto AutoField
auto64 BigAutoField
int64 BigIntegerField
binary BinaryField
bool BooleanField
char CharField
date DateField
datetime DateTimeField
decimal DecimalField
duration DurationField
interval DurationField
email EmailField
file FileField
path FilePathFields
float FloatField
image ImageField
int IntegerField
integer IntegerField
ip GenericIPAddressField
nullbool NullBooleanField
pint PositiveIntegerField
slug SlugField
text TextField
time TimeField
url URLField
uuid UUIDField"
"""
print(datatypes)
|
StarcoderdataPython
|
1740220
|
<gh_stars>1000+
# encoding: utf-8
"""
srcap.py
Created by <NAME>
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
"""
from struct import unpack
from exabgp.util import split
from exabgp.bgp.message.update.attribute.bgpls.linkstate import LinkState
from exabgp.bgp.message.update.attribute.bgpls.linkstate import FlagLS
from exabgp.bgp.message.notification import Notify
# draft-gredler-idr-bgp-ls-segment-routing-ext-03
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Flags | RESERVED |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Range Size |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# // SID/Label Sub-TLV (variable) //
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# SR Node Cap Flags
# +
# One or more entries, each of which have the following format:
#
# Range Size: 3 octet value indicating the number of labels in
# the range.
#
# SID/Label sub-TLV (as defined in Section 2.3.7.2).
# isis-segment-routing-extensions 3.1. SR-Capabilities Sub-TLV
@LinkState.register()
class SrCapabilities(FlagLS):
REPR = 'SR Capability Flags'
JSON = 'sr_capability_flags'
TLV = 1034
FLAGS = ['I', 'V', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV']
def __init__(self, flags, sids):
FlagLS.__init__(self, flags)
self.sids = sids
def __repr__(self):
return "%s: %s, sids: %s" % (self.JSON, self.flags, self.sids)
@classmethod
def unpack(cls, data, length):
# Extract node capability flags
flags = cls.unpack_flags(data[0:1])
# Move pointer past flags and reserved bytes
data = data[2:]
sids = []
while data:
# Range Size: 3 octet value indicating the number of labels in
# the range.
range_size = unpack('!L', bytes([0]) + data[:3])[0]
# SID/Label: If length is set to 3, then the 20 rightmost bits
# represent a label. If length is set to 4, then the value
# represents a 32 bit SID.
t, l = unpack('!HH', data[3:7])
if t != 1161:
raise Notify(3, 5, "Invalid sub-TLV type: {}".format(t))
if l == 3:
sids.append((range_size, unpack('!L', bytes([0]) + data[:3])[0]))
elif l == 4:
# XXX: really we are reading 7+ but then re-parsing it again ??
sids.append((range_size, unpack('!I', data[7 : l + 7])[0]))
data = data[l + 7 :]
return cls(flags, sids)
def json(self, compact=None):
return '"{}": {}, "sids": {}'.format(self.JSON, FlagLS.json(self), self.sids)
|
StarcoderdataPython
|
1635406
|
# -*- coding: utf-8 -*-
# @Time : 2018/6/7 下午5:22
# @Author : waitWalker
# @Email : <EMAIL>
# @File : MTTDataBase.py
# @Software: PyCharm
# 数据连接
import pymysql
import time
class MTTDataBase:
error_code = ''
instance = None
# db = None
# cursor = None
timeout = 30
time_count = 0
# 构造函数 初始化实例 创建 连接连接db对象
def __init__(self, config):
try:
self.db = pymysql.connect(
host=config['host'],
user=config['user'],
password=config['password'],
db=config['db'],
charset=config['charset'],
cursorclass=pymysql.cursors.DictCursor)
print("connect database success")
except pymysql.Error as error:
self.error_code = error.args[0]
error_msg = 'mysql connect error !',error[1]
print(error_msg)
if self.time_count < self.timeout:
interval = 5
self.time_count += interval
time.sleep(interval)
return self.__init__(config)
else:
raise Exception(error_msg)
self.c = self.db.cursor()
# 查询数据 根据查询结果 添加相应的返回值
def query(self, sql):
try:
result = self.cursor.execute(sql)
except pymysql.Error as error:
print('query error:', error)
self.error_code = error.args[0]
result = False
return result
# 更新数据 数据更新失败:回滚
def update(self, sql):
try:
result = self.cursor.execute(sql)
self.db.commit()
except pymysql.Error as error:
print("update database error:", error)
self.error_code = error.args[0]
result = False
self.rollback()
return result
# 插入输入 数据插入失败:回滚
def insert(self, sql):
try:
result = self.cursor.execute(sql)
self.db.commit()
except pymysql.Error as error:
print("insert error:",error)
self.error_code = error.args[0]
result = False
self.rollback()
return result
# 删除数据 数据删除失败:回滚
def delete(self, sql):
try:
result = self.cursor.execute(sql)
self.db.commit()
except pymysql.Error as error:
print("delete error:",error)
self.error_code = error.args[0]
result = False
self.rollback()
return result
# 获取所有数据
def fetchall(self):
return self.cursor.fetchall()
# 回滚: 遇到错误或者其他情况
def rollback(self):
self.db.rollback()
# 关闭数据库
def close(self):
try:
self.cursor.close()
self.db.close()
except pymysql.Error as error:
print(error)
|
StarcoderdataPython
|
3328104
|
import json
import requests
from data import *
from url import Url
for url_to_check in urls_to_check:
for i, url in enumerate( url_to_check['urls'] ):
print(url)
for j, child in enumerate( Url(url).get_children() ):
print(f'{i}.{j} - Checking {child}')
if child.response_status() != 200 or j==3:
body = \
f'You have a broken link at {url}\n' + \
f'- Link: {child}\n' + \
'\n\n' + \
'This message was automatically created by <https://github.com/matheusvanzan/broken-links-monitor|broken-links-monitor>'
response = requests.post(
url_to_check['channel'],
json.dumps({ 'text': body })
)
print(response)
|
StarcoderdataPython
|
926
|
<gh_stars>0
"""
Enumerator Test
"""
from typing import Any
class is_valid_enum():
"""
Test if a variable is on a list of valid values
"""
__slots__ = ('symbols')
def __init__(self, **kwargs):
"""
-> "type": "enum", "symbols": ["up", "down"]
symbols: list of allowed values (case sensitive)
"""
self.symbols = kwargs.get('symbols', ())
def __call__(self, value: Any) -> bool:
return value and value in self.symbols
def __str__(self):
return f'enum {self.symbols}'
|
StarcoderdataPython
|
1683174
|
<reponame>jamesramsay100/Fantasy-Premier-League<filename>global_scraper.py
from parsers import *
from cleaners import *
from getters import *
from collector import collect_gw, merge_gw
from understat import parse_epl_data
def parse_data():
""" Parse and store all the data
"""
print("Getting data")
data = get_data()
season = '2019-20'
base_filename = 'data/' + season + '/'
print("Parsing summary data")
parse_players(data["elements"], base_filename)
gw_num = 0
events = data["events"]
for event in events:
if event["is_current"] == True:
gw_num = event["id"]
print("Cleaning summary data")
clean_players(base_filename + 'players_raw.csv', base_filename)
print("Getting fixtures data")
fixtures(base_filename)
print("Getting teams data")
parse_team_data(data["teams"], base_filename)
print("Extracting player ids")
id_players(base_filename + 'players_raw.csv', base_filename)
player_ids = get_player_ids(base_filename)
num_players = len(data["elements"])
player_base_filename = base_filename + 'players/'
gw_base_filename = base_filename + 'gws/'
print("Extracting player specific data")
for i,name in player_ids.items():
player_data = get_individual_player_data(i)
parse_player_history(player_data["history_past"], player_base_filename, name, i)
parse_player_gw_history(player_data["history"], player_base_filename, name, i)
if gw_num > 0:
print("Collecting gw scores")
collect_gw(gw_num, player_base_filename, gw_base_filename)
print("Merging gw scores")
merge_gw(gw_num, gw_base_filename)
parse_epl_data(base_filename)
def fixtures(base_filename):
data = get_fixtures_data()
parse_fixtures(data, base_filename)
def main():
parse_data()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
72381
|
import csv
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import numpy as np
# read the data
csvfile=open("weightedX.csv", 'r')
x = list(csv.reader(csvfile))
csvfile=open("weightedY.csv", 'r')
y = list(csv.reader(csvfile))
m=len(x)
n=1
x3=[]
y2=[]
for i in range(m):
x3.append(float(x[i][0]))
y2.append(float(y[i][0]))
# normalise the data
meanx=sum(x3)/m
v=0 # variance
for i in range(m):
t=x3[i]-meanx
v+=t*t
v/=m
v=math.sqrt(v)
for i in range(m):
x3[i]=(x3[i]-meanx)/v
x2=[]
for i in range(m):
x2.append(np.array([1,x3[i]]))
X=np.array(x2)
Y=np.array(y2)
xvalues=np.linspace(min(x3),max(x3),100)
plt.ion()
fig=plt.figure()
ax1 = fig.add_subplot(1,1,1)
# plots Training data &
# straight line from linear regression
def pl(th):
ax1.clear()
ax1.scatter(x3, y2, label= "Training Data", color= "r",
marker= ".", s=10)
the=list(th)
yvalues=the[1]*xvalues+the[0]
ax1.plot(xvalues, yvalues, label="Hypothesis function learned",color ='b')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.title('Q2 (a)')
plt.show()
plt.pause(0.001)
# All weights same
# theta= inv(X'*X)*X'*Y
theta = np.dot(np.dot(np.linalg.inv(np.dot(X.T ,X)) , np.transpose(X)) , Y)
print("theta=",theta)
plt.ioff()
pl(theta)
# Part (b)
fig=plt.figure()
ax1 = fig.add_subplot(1,1,1)
# change value of tau for part (c)
tau=0.8
tau2=tau*tau
# plots the hypothesis function learned
def plot_2():
ax1.clear()
ax1.scatter(x3, y2, label= "Training Data", color= "r",
marker= ".", s=10)
# calculate the yaxis values for corresponding xaxis values
yvalues=[]
for i in range(len(xvalues)):
weights=[]
for j in range(m):
c=xvalues[i]-X[j][1]
power=-(c*c)/(2*tau2)
weights.append(math.exp(power))
# convert np array to diagonal matrix
# W is m*m matrix
W=np.diag(np.array(weights))
# theta=inv(X'*W*X)*X'*W*Y
the = np.dot(np.dot(np.dot(np.linalg.inv(np.dot(np.dot(X.T ,W),X)) , X.T), W) , Y)
yvalues.append(the[1]*xvalues[i]+the[0])
ax1.plot(xvalues, yvalues, label="Hypothesis function learned",color ='b')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.title('Q2 tau={}'.format(tau))
plt.show()
plt.pause(0.001)
plt.ioff()
plot_2()
|
StarcoderdataPython
|
76585
|
"""Pareto tail indices plot."""
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgba_array
import matplotlib.cm as cm
import numpy as np
from xarray import DataArray
from .plot_utils import (
_scale_fig_size,
get_coords,
color_from_dim,
format_coords_as_labels,
get_plotting_function,
)
from ..stats import ELPDData
def plot_khat(
khats,
color=None,
xlabels=False,
show_bins=False,
bin_format="{1:.1f}%",
annotate=False,
hover_label=False,
hover_format="{1}",
figsize=None,
textsize=None,
coords=None,
legend=False,
markersize=None,
ax=None,
hlines_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
**kwargs
):
"""
Plot Pareto tail indices.
Parameters
----------
khats : ELPDData cointaining pareto shapes information or array
Pareto tail indices.
color : str or array_like, optional
Colors of the scatter plot, if color is a str all dots will have the same color,
if it is the size of the observations, each dot will have the specified color,
otherwise, it will be interpreted as a list of the dims to be used for the color code
xlabels : bool, optional
Use coords as xticklabels
show_bins : bool, optional
Show the number of khats which fall in each bin.
bin_format : str, optional
The string is used as formatting guide calling ``bin_format.format(count, pct)``.
annotate : bool, optional
Show the labels of k values larger than 1.
hover_label : bool, optional
Show the datapoint label when hovering over it with the mouse. Requires an interactive
backend.
hover_format : str, optional
String used to format the hover label via ``hover_format.format(idx, coord_label)``
figsize : tuple, optional
Figure size. If None it will be defined automatically.
textsize: float, optional
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
coords : mapping, optional
Coordinates of points to plot. **All** values are used for computation, but only a
a subset can be plotted for convenience.
legend : bool, optional
Include a legend to the plot. Only taken into account when color argument is a dim name.
markersize: int, optional
markersize for scatter plot. Defaults to `None` in which case it will
be chosen based on autoscaling for figsize.
ax: axes, optional
Matplotlib axes or bokeh figures.
hlines_kwargs: dictionary, optional
Additional keywords passed to ax.hlines.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
kwargs :
Additional keywords passed to ax.scatter.
Returns
-------
axes : matplotlib axes or bokeh figures
Examples
--------
Plot estimated pareto shape parameters showing how many fall in each category.
.. plot::
:context: close-figs
>>> import arviz as az
>>> radon = az.load_arviz_data("radon")
>>> loo_radon = az.loo(radon, pointwise=True)
>>> az.plot_khat(loo_radon, show_bins=True)
Show xlabels
.. plot::
:context: close-figs
>>> centered_eight = az.load_arviz_data("centered_eight")
>>> khats = az.loo(centered_eight, pointwise=True).pareto_k
>>> az.plot_khat(khats, xlabels=True, annotate=True)
Use coord values to create color mapping
.. plot::
:context: close-figs
>>> az.plot_khat(loo_radon, color="observed_county", cmap="tab20")
Use custom color scheme
.. plot::
:context: close-figs
>>> counties = radon.posterior.observed_county.values
>>> colors = [
... "blue" if county[-1] in ("A", "N") else "green" for county in counties
... ]
>>> az.plot_khat(loo_radon, color=colors)
"""
if hlines_kwargs is None:
hlines_kwargs = {}
hlines_kwargs.setdefault("linestyle", [":", "-.", "--", "-"])
hlines_kwargs.setdefault("alpha", 0.7)
hlines_kwargs.setdefault("zorder", -1)
hlines_kwargs.setdefault("color", "C1")
if coords is None:
coords = {}
if color is None:
color = "C0"
if isinstance(khats, np.ndarray):
khats = khats.flatten()
xlabels = False
legend = False
dims = []
else:
if isinstance(khats, ELPDData):
khats = khats.pareto_k
if not isinstance(khats, DataArray):
raise ValueError("Incorrect khat data input. Check the documentation")
khats = get_coords(khats, coords)
dims = khats.dims
n_data_points = khats.size
xdata = np.arange(n_data_points)
if isinstance(khats, DataArray):
coord_labels = format_coords_as_labels(khats)
else:
coord_labels = xdata.astype(str)
(figsize, ax_labelsize, _, xt_labelsize, linewidth, scaled_markersize) = _scale_fig_size(
figsize, textsize
)
if markersize is None:
markersize = scaled_markersize ** 2 # s in scatter plot mus be markersize square
# for dots to have the same size
kwargs.setdefault("s", markersize)
kwargs.setdefault("marker", "+")
color_mapping = None
cmap = None
if isinstance(color, str):
if color in dims:
colors, color_mapping = color_from_dim(khats, color)
cmap_name = kwargs.get("cmap", plt.rcParams["image.cmap"])
cmap = getattr(cm, cmap_name)
rgba_c = cmap(colors)
else:
legend = False
rgba_c = to_rgba_array(np.full(n_data_points, color))
else:
legend = False
try:
rgba_c = to_rgba_array(color)
except ValueError:
cmap_name = kwargs.get("cmap", plt.rcParams["image.cmap"])
cmap = getattr(cm, cmap_name)
rgba_c = cmap(color)
khats = khats if isinstance(khats, np.ndarray) else khats.values.flatten()
alphas = 0.5 + 0.2 * (khats > 0.5) + 0.3 * (khats > 1)
rgba_c[:, 3] = alphas
plot_khat_kwargs = dict(
hover_label=hover_label,
hover_format=hover_format,
ax=ax,
figsize=figsize,
xdata=xdata,
khats=khats,
rgba_c=rgba_c,
kwargs=kwargs,
annotate=annotate,
coord_labels=coord_labels,
ax_labelsize=ax_labelsize,
xt_labelsize=xt_labelsize,
show_bins=show_bins,
linewidth=linewidth,
hlines_kwargs=hlines_kwargs,
xlabels=xlabels,
legend=legend,
color_mapping=color_mapping,
cmap=cmap,
color=color,
n_data_points=n_data_points,
bin_format=bin_format,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
plot_khat_kwargs.pop("hover_label")
plot_khat_kwargs.pop("hover_format")
plot_khat_kwargs.pop("kwargs")
plot_khat_kwargs.pop("ax_labelsize")
plot_khat_kwargs.pop("xt_labelsize")
plot_khat_kwargs.pop("hlines_kwargs")
plot_khat_kwargs.pop("xlabels")
plot_khat_kwargs.pop("legend")
plot_khat_kwargs.pop("color_mapping")
plot_khat_kwargs.pop("cmap")
plot_khat_kwargs.pop("color")
# TODO: Add backend kwargs
plot = get_plotting_function("plot_khat", "khatplot", backend)
axes = plot(**plot_khat_kwargs)
return axes
|
StarcoderdataPython
|
30014
|
# Generated by Django 3.0.11 on 2021-01-01 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bpp", "0231_ukryj_status_korekty"),
]
operations = [
migrations.AlterField(
model_name="autor",
name="pseudonim",
field=models.CharField(
blank=True,
help_text="\n Jeżeli w bazie danych znajdują się autorzy o zbliżonych imionach, nazwiskach i tytułach naukowych,\n skorzystaj z tego pola aby ułatwić ich rozróżnienie. Pseudonim pokaże się w polach wyszukiwania\n oraz na podstronie autora, po nazwisku i tytule naukowym.",
max_length=300,
null=True,
),
),
migrations.AlterField(
model_name="uczelnia",
name="sortuj_jednostki_alfabetycznie",
field=models.BooleanField(
default=True,
help_text="Jeżeli ustawione na 'FAŁSZ', sortowanie jednostek będzie odbywało się ręcznie\n tzn za pomocą ustalonej przez administratora systemu kolejności. ",
),
),
]
|
StarcoderdataPython
|
4825466
|
<gh_stars>0
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
# import logging
import luigi
from luigi.contrib.lsf import LSFJobTask
from tool.bwa_mem_aligner import bwaAlignerMEMTool
from tool.bam_utils import bamUtilsTask
# logger = logging.getLogger('luigi-interface')
class ProcessMemBwaSingle(LSFJobTask):
"""
Tool wrapper for aligning single end reads using BWA MEM
"""
genome_fa = luigi.Parameter()
genome_idx = luigi.Parameter()
fastq_file = luigi.Parameter()
output_bam = luigi.Parameter()
def output(self):
"""
Returns
-------
output : luigi.LocalTarget()
Location of the aligned reads in bam format
"""
return luigi.LocalTarget(self.output_bam)
def work(self):
"""
Worker function for splitting the FASTQ file into smaller chunks
Parameters
----------
genome_fa : str
Location of the FASTA file of the genome to align the reads to
genome_idx : str
Location of the index files in .tar.gz file prepared by the BWA
indexer
fastq_file : str
Location of the FASTQ file
output_bam : str
Location of the aligned reads in bam format
"""
bwa_handle = bwaAlignerMEMTool({"no-untar" : True})
bwa_handle.bwa_aligner_single(
self.genome_fa,
self.fastq_file,
self.output_bam,
self.genome_idx,
{}
)
bam_handle = bamUtilsTask()
bam_handle.bam_sort(self.output_bam)
class ProcessMemBwaPaired(LSFJobTask):
"""
Tool wrapper for aligning single end reads using BWA MEM
"""
genome_fa = luigi.Parameter()
genome_idx = luigi.Parameter()
fastq_file_1 = luigi.Parameter()
fastq_file_2 = luigi.Parameter()
output_bam = luigi.Parameter()
def output(self):
"""
Returns
-------
output : luigi.LocalTarget()
Location of the aligned reads in bam format
"""
return luigi.LocalTarget(self.output_bam)
def work(self):
"""
Worker function for splitting the FASTQ file into smaller chunks
Parameters
----------
genome_fa : str
Location of the FASTA file of the genome to align the reads to
genome_idx : str
Location of the index files in .tar.gz file prepared by the BWA
indexer
fastq_file_1 : str
Location of the FASTQ file
fastq_file_2 : str
Location of the FASTQ file
output_bam : str
Location of the aligned reads in bam format
"""
bwa_handle = bwaAlignerMEMTool({"no-untar" : True})
bwa_handle.bwa_aligner_paired(
self.genome_fa,
self.fastq_file_1,
self.fastq_file_2,
self.output_bam,
self.genome_idx,
{}
)
bam_handle = bamUtilsTask()
bam_handle.bam_sort(self.output_bam)
|
StarcoderdataPython
|
1717284
|
import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
imge = cv2.VideoCapture(0)
while True:
ret,img = imge.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(gray,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('img',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
StarcoderdataPython
|
1697281
|
#__________________ Script Python - versão 3.8 _____________________#
# Autor |> <NAME>
# Data |> 05 de Março de 2021
# Paradigma |> Orientado à objetos
# Objetivo |> Interação com usuário, input.
#___________________________________________________________________#
class ReceberNumero:
__num = None
def __init__(self):
try:
ReceberNumero.__num = int(input('Digite um número: '))
MostrarNumero(ReceberNumero.__num)
except:
print('Ops.. Algo deu errado.')
class MostrarNumero:
@classmethod
def __init__(cls, numero):
print('O número que você digitou foi o {}'.format(numero))
if __name__ == '__main__':
x = ReceberNumero()
|
StarcoderdataPython
|
3341849
|
<gh_stars>0
import sys
def part1(numbers):
blocks = list(numbers)
seenConfigurations = []
iterationcnt = 1
while True:
nextSplitIdx = blocks.index(max(blocks))
banksize = blocks[nextSplitIdx]
blocks[nextSplitIdx] = 0
nextBlock = nextSplitIdx + 1
while banksize > 0:
if nextBlock == len(blocks):
nextBlock = 0
blocks[nextBlock] += 1
banksize -= 1
nextBlock += 1
if blocks in seenConfigurations:
break
seenConfigurations.append(list(blocks))
iterationcnt += 1
return iterationcnt
def part2(numbers):
blocks = list(numbers)
seenConfigurations = {}
iterationcnt = 1
while True:
nextSplitIdx = blocks.index(max(blocks))
banksize = blocks[nextSplitIdx]
blocks[nextSplitIdx] = 0
nextBlock = nextSplitIdx + 1
while banksize > 0:
if nextBlock == len(blocks):
nextBlock = 0
blocks[nextBlock] += 1
banksize -= 1
nextBlock += 1
blockstr = ' '.join(map(str, blocks))
if blockstr in seenConfigurations:
return iterationcnt - seenConfigurations[blockstr]
break
seenConfigurations[blockstr] = iterationcnt
iterationcnt += 1
return None
if __name__ == "__main__":
data = [int(x) for x in sys.stdin.read().split('\t')]
print part1(data)
print part2(data)
|
StarcoderdataPython
|
3321359
|
def is_zero(a):
return all(i == 0 for i in a)
def dot(a, b):
assert len(a) == len(b)
return sum(a[i] * b[i] for i in range(len(a)))
def cross(a, b):
return [a[1]*b[2] - a[2]*b[1], a[2]*b[0] - a[0]*b[2], a[0]*b[1] - a[1]*b[0]]
def neg(a):
return [-i for i in a]
def scale(a, n):
return [n * i for i in a]
|
StarcoderdataPython
|
3312950
|
import torch
import torch.nn.functional as F
from torch import nn, optim
from torchvision import models
from utils import get_upsampling_weight
from collections import OrderedDict
class FCN32VGG(nn.Module):
def __init__(self, num_classes=21, pretrained=False):
super(FCN32VGG, self).__init__()
vgg = models.vgg16(pretrained=pretrained)
features, classifier = list(vgg.features.children()), list(vgg.classifier.children())
# Why pad the input:
# https://github.com/shelhamer/fcn.berkeleyvision.org#frequently-asked-questions
# features[0].padding = (100, 100)
for f in features:
if 'MaxPool' in f.__class__.__name__:
f.ceil_mode = True
elif 'ReLU' in f.__class__.__name__:
f.inplace = True
self.features5 = nn.Sequential(*features)
# As the shapes are different, we can't use load_state_dict/state_dict directly
fc6 = nn.Conv2d(512, 4096, kernel_size=7)
param6 = classifier[0].state_dict()
param6['weight'] = param6['weight'].view(4096, 512, 7, 7)
fc6.load_state_dict(param6, strict=True)
fc7 = nn.Conv2d(4096, 4096, kernel_size=1)
param7 = classifier[3].state_dict()
param7['weight'] = param7['weight'].view(4096, 4096, 1, 1)
fc7.load_state_dict(param7, strict=True)
final = nn.Conv2d(4096, num_classes, kernel_size=1)
final.weight.data.zero_()
final.bias.data.zero_()
upscore = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=64, stride=32, bias=False)
upscore.weight.data.copy_(get_upsampling_weight(num_classes, num_classes, 64))
self.final = nn.Sequential(OrderedDict([
('conv0', fc6),
('relu1', nn.ReLU(inplace=True)),
('dropout2', nn.Dropout()),
('conv3', fc7),
('relu4', nn.ReLU(inplace=True)),
('dropout5', nn.Dropout()),
('conv6', final),
('tconv7', upscore)
]))
def forward(self, x):
this_shape = x.size()
x = self.features5(x)
x = self.final(x)
x = F.upsample(input=x, size=this_shape[2:], mode='bilinear', align_corners=True)
return x
class FCN32VGG_MULTI(nn.Module):
def __init__(self, num_classes=[21,20], pretrained=False):
super(FCN32VGG_MULTI, self).__init__()
vgg = models.vgg16(pretrained=pretrained)
features, classifier = list(vgg.features.children()), list(vgg.classifier.children())
# Why pad the input:
# https://github.com/shelhamer/fcn.berkeleyvision.org#frequently-asked-questions
# features[0].padding = (100, 100)
for f in features:
if 'MaxPool' in f.__class__.__name__:
f.ceil_mode = True
elif 'ReLU' in f.__class__.__name__:
f.inplace = True
self.features5 = nn.Sequential(*features)
# As the shapes are different, we can't use load_state_dict/state_dict directly
fc6 = nn.Conv2d(512, 4096, kernel_size=7)
param6 = classifier[0].state_dict()
param6['weight'] = param6['weight'].view(4096, 512, 7, 7)
fc6.load_state_dict(param6, strict=True)
fc7 = nn.Conv2d(4096, 4096, kernel_size=1)
param7 = classifier[3].state_dict()
param7['weight'] = param7['weight'].view(4096, 4096, 1, 1)
fc7.load_state_dict(param7, strict=True)
final1 = nn.Conv2d(4096, num_classes[0], kernel_size=1)
final1.weight.data.zero_()
final1.bias.data.zero_()
upscore1 = nn.ConvTranspose2d(num_classes[0], num_classes[0], kernel_size=64, stride=32, bias=False)
upscore1.weight.data.copy_(get_upsampling_weight(num_classes[0], num_classes[0], 64))
self.final1 = nn.Sequential(OrderedDict([
('conv0', fc6),
('relu1', nn.ReLU(inplace=True)),
('dropout2', nn.Dropout()),
('conv3', fc7),
('relu4', nn.ReLU(inplace=True)),
('dropout5', nn.Dropout()),
('conv6', final1),
('tconv7', upscore1)
]))
final2 = nn.Conv2d(4096, num_classes[1], kernel_size=1)
final2.weight.data.zero_()
final2.bias.data.zero_()
upscore2 = nn.ConvTranspose2d(num_classes[1], num_classes[1], kernel_size=64, stride=32, bias=False)
upscore2.weight.data.copy_(get_upsampling_weight(num_classes[1], num_classes[1], 64))
self.final2 = nn.Sequential(OrderedDict([
('conv0', fc6),
('relu1', nn.ReLU(inplace=True)),
('dropout2', nn.Dropout()),
('conv3', fc7),
('relu4', nn.ReLU(inplace=True)),
('dropout5', nn.Dropout()),
('conv6', final2),
('tconv7', upscore2)
]))
def forward(self, x, task):
this_shape = x.size()
x = self.features5(x)
x = self.final1(x) if task == 0 else self.final2(x)
x = F.upsample(input=x, size=this_shape[2:], mode='bilinear', align_corners=True)
return x
class FCN32ALEXNET(nn.Module):
def __init__(self, num_classes=21, pretrained=False):
super(FCN32ALEXNET, self).__init__()
alexnet = models.alexnet(pretrained=pretrained)
features, classifier = list(alexnet.features.children()), list(alexnet.classifier.children())
# Why pad the input:
# https://github.com/shelhamer/fcn.berkeleyvision.org#frequently-asked-questions
# features[0].padding = (100, 100)
for f in features:
if 'MaxPool' in f.__class__.__name__:
f.ceil_mode = True
elif 'ReLU' in f.__class__.__name__:
f.inplace = True
self.features5 = nn.Sequential(*features)
# As the shapes are different, we can't use load_state_dict/state_dict directly
fc6 = nn.Conv2d(256, 4096, kernel_size=6)
param6 = classifier[1].state_dict()
param6['weight'] = param6['weight'].view(4096, 256, 6, 6)
fc6.load_state_dict(param6, strict=True)
fc7 = nn.Conv2d(4096, 4096, kernel_size=1)
param7 = classifier[4].state_dict()
param7['weight'] = param7['weight'].view(4096, 4096, 1, 1)
fc7.load_state_dict(param7, strict=True)
final = nn.Conv2d(4096, num_classes, kernel_size=1)
final.weight.data.zero_()
final.bias.data.zero_()
upscore = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=64, stride=32, bias=False)
upscore.weight.data.copy_(get_upsampling_weight(num_classes, num_classes, 64))
self.final = nn.Sequential(OrderedDict([
('conv0', fc6),
('relu1', nn.ReLU(inplace=True)),
('dropout2', nn.Dropout()),
('conv3', fc7),
('relu4', nn.ReLU(inplace=True)),
('dropout5', nn.Dropout()),
('conv6', final),
('tconv7', upscore)
]))
def forward(self, x):
this_shape = x.size()
x = self.features5(x)
x = self.final(x)
x = F.upsample(input=x, size=this_shape[2:], mode='bilinear', align_corners=True)
return x
class FCN32RESNET(nn.Module):
def __init__(self, num_classes=21, pretrained=False, depth=18, dprob=0.1):
super(FCN32RESNET, self).__init__()
print('pretrained = {}, depth = {}'.format(pretrained, depth))
if depth == 18:
resnet = models.resnet18(pretrained=pretrained)
elif depth == 34:
resnet = models.resnet34(pretrained=pretrained)
elif depth == 50:
resnet = models.resnet50(pretrained=pretrained)
elif depth == 101:
resnet = models.resnet101(pretrained=pretrained)
elif depth == 152:
resnet = models.resnet152(pretrained=pretrained)
else:
raise TypeError('Invalid Resnet depth')
features = [*resnet.children()]
num_channels = features[-1].in_features
features = features[0:-1] # remove the original 1000-dimension Linear layer
for f in features:
if 'MaxPool' in f.__class__.__name__ or 'AvgPool' in f.__class__.__name__:
f.ceil_mode = True
elif 'ReLU' in f.__class__.__name__:
f.inplace = True
# Add Dropout module after each conv layer for torchvision.models.resnet
# modified_features = []
# for f in features:
# if f.__class__.__name__ == 'Sequential':
# new_seq = []
# for ff in f.children():
# list_modules = [*ff.children()]
# for module in list_modules:
# new_seq.append(module)
# if 'Conv' in module.__class__.__name__:
# new_seq.append(nn.Dropout(p=dprob))
# modified_features.append(nn.Sequential(*new_seq))
# else:
# modified_features.append(f)
self.features = nn.Sequential(*features)
final = nn.Conv2d(num_channels, num_classes, kernel_size=1)
final.weight.data.zero_()
final.bias.data.zero_()
upscore = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=64, stride=32, bias=False)
upscore.weight.data.copy_(get_upsampling_weight(num_classes, num_classes, 64))
self.final = nn.Sequential(OrderedDict([
('conv6', final),
('tconv7', upscore)
]))
def forward(self, x):
this_shape = x.size()
x = self.features(x)
x = self.final(x)
x = F.upsample(input=x, size=this_shape[2:], mode='bilinear', align_corners=True)
return x
class FCN32RESNET_MULTI(nn.Module):
def __init__(self, num_classes=[21,20], pretrained=False, depth=18):
super(FCN32RESNET_MULTI, self).__init__()
if depth == 18:
resnet = models.resnet18(pretrained=pretrained)
elif depth == 34:
resnet = models.resnet34(pretrained=pretrained)
elif depth == 50:
resnet = models.resnet50(pretrained=pretrained)
elif depth == 101:
resnet = models.resnet101(pretrained=pretrained)
elif depth == 152:
resnet = models.resnet152(pretrained=pretrained)
else:
raise TypeError('Invalid Resnet depth')
features = [*resnet.children()]
num_channels = features[-1].in_features
features = features[0:-1] # remove the original 1000-dimension Linear layer
for f in features:
if 'MaxPool' in f.__class__.__name__ or 'AvgPool' in f.__class__.__name__:
f.ceil_mode = True
elif 'ReLU' in f.__class__.__name__:
f.inplace = True
self.features = nn.Sequential(*features)
final1 = nn.Conv2d(num_channels, num_classes[0], kernel_size=1)
final1.weight.data.zero_()
final1.bias.data.zero_()
upscore1 = nn.ConvTranspose2d(num_classes[0], num_classes[0], kernel_size=64, stride=32, bias=False)
upscore1.weight.data.copy_(get_upsampling_weight(num_classes[0], num_classes[0], 64))
self.final1 = nn.Sequential(OrderedDict([
('conv6', final1),
('tconv7', upscore1)
]))
final2 = nn.Conv2d(num_channels, num_classes[1], kernel_size=1)
final2.weight.data.zero_()
final2.bias.data.zero_()
upscore2 = nn.ConvTranspose2d(num_classes[1], num_classes[1], kernel_size=64, stride=32, bias=False)
upscore2.weight.data.copy_(get_upsampling_weight(num_classes[1], num_classes[1], 64))
self.final2 = nn.Sequential(OrderedDict([
('conv6', final2),
('tconv7', upscore2)
]))
def forward(self, x, task):
this_shape = x.size()
x = self.features(x)
x = self.final1(x) if task == 0 else self.final2(x)
x = F.upsample(input=x, size=this_shape[2:], mode='bilinear', align_corners=True)
return x
|
StarcoderdataPython
|
1623432
|
<reponame>tacosync/skcomws
'''
Socket.IO server for testing
CLI:
python -m watchgod server.main [aiohttp|sanic|tornado|asgi]
Test results:
| connect | disconnect | event | background_task | Ctrl+C
---------+---------+------------+-------+-----------------|--------
aiohttp | O | O | O | O | O
sanic | O | X | O | X | O
torando | O | O | O | O | X
asgi | X | ? | ? | X | O
'''
import asyncio
import sys
import socketio
import tornado.ioloop
import tornado.web
import uvicorn
from aiohttp import web
from sanic import Sanic
PORT = 63047
count = 0
if len(sys.argv) >= 2 and sys.argv[1] in ['aiohttp', 'sanic', 'tornado', 'asgi']:
ASYNC_MODE = sys.argv[1]
else:
ASYNC_MODE = 'aiohttp'
if ASYNC_MODE == 'sanic':
sio = socketio.AsyncServer(async_mode=ASYNC_MODE, cors_allowed_origins=[])
else:
sio = socketio.AsyncServer(async_mode=ASYNC_MODE)
if ASYNC_MODE == 'aiohttp':
app = web.Application()
if ASYNC_MODE == 'sanic':
app = Sanic(name='Just a simple service')
app.config['CORS_SUPPORTS_CREDENTIALS'] = True
if ASYNC_MODE == 'tornado':
app = tornado.web.Application([
(r"/socket.io/", socketio.get_tornado_handler(sio))
])
if ASYNC_MODE == 'asgi':
app = socketio.ASGIApp(sio)
tick_queue = asyncio.Queue()
@sio.event
async def connect(sid, environ, auth):
print('[%s]: connected' % sid)
@sio.event
async def disconnect(sid):
print('[%s]: disconnected' % sid)
@sio.event
async def client_said(sid, data):
global count
message = 'This is server response #%d.' % count
count += 1
print('[%s]: %s' % (sid, data['message']))
await sio.emit('server_said', { 'message': message })
async def tick_dequeue():
while True:
await asyncio.sleep(3)
tick = await tick_queue.get()
await sio.emit('tick', tick)
print('tick_dequeue() qsize=%d' % tick_queue.qsize())
async def tick_enqueue():
while True:
await asyncio.sleep(1)
await tick_queue.put({
'security_code': '2330.TW',
'close': 601.15
})
print('tick_enqueue()')
def get_asgi_app():
global app
return app
def main():
print('==============================')
print(' async_mode = %s' % ASYNC_MODE)
print('==============================')
sio.start_background_task(tick_dequeue)
sio.start_background_task(tick_enqueue)
if ASYNC_MODE == 'aiohttp':
sio.attach(app)
web.run_app(app, port=PORT)
if ASYNC_MODE == 'tornado':
app.listen(PORT)
tornado.ioloop.IOLoop.current().start()
if ASYNC_MODE == 'sanic':
sio.attach(app)
app.run(port=PORT)
if ASYNC_MODE == 'asgi':
uvicorn.run('server:app', host="127.0.0.1", port=PORT, log_level="info")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3302638
|
<reponame>steffgrez/fast-jsonrpc
from fast_jsonrpc2.resolver import JSONRPCResolver
from fast_jsonrpc2.version import __version__
|
StarcoderdataPython
|
1652654
|
"""Constants for the Todoist component."""
CONF_EXTRA_PROJECTS = "custom_projects"
CONF_PROJECT_DUE_DATE = "due_date_days"
CONF_PROJECT_LABEL_WHITELIST = "labels"
CONF_PROJECT_WHITELIST = "include_projects"
# Calendar Platform: Does this calendar event last all day?
ALL_DAY = "all_day"
# Attribute: All tasks in this project
ALL_TASKS = "all_tasks"
# Todoist API: "Completed" flag -- 1 if complete, else 0
CHECKED = "checked"
# Attribute: Is this task complete?
COMPLETED = "completed"
# Todoist API: What is this task about?
# Service Call: What is this task about?
CONTENT = "content"
# Calendar Platform: Get a calendar event's description
DESCRIPTION = "description"
# Calendar Platform: Used in the '_get_date()' method
DATETIME = "dateTime"
DUE = "due"
# Service Call: When is this task due (in natural language)?
DUE_DATE_STRING = "due_date_string"
# Service Call: The language of DUE_DATE_STRING
DUE_DATE_LANG = "due_date_lang"
# Service Call: The available options of DUE_DATE_LANG
DUE_DATE_VALID_LANGS = [
"en",
"da",
"pl",
"zh",
"ko",
"de",
"pt",
"ja",
"it",
"fr",
"sv",
"ru",
"es",
"nl",
]
# Attribute: When is this task due?
# Service Call: When is this task due?
DUE_DATE = "due_date"
# Attribute: Is this task due today?
DUE_TODAY = "due_today"
# Attribute: Is this task due tomorrow?
DUE_TOMORROW = "due_tomorrow"
# Attribute: Was this task due yesterday?
DUE_NEXT7DAYS = "due_next7days"
# Attribute: Was this task due yesterday?
TASK_DUE_FORMATTED = "due_formatted"
# Calendar Platform: When a calendar event ends
END = "end"
# Todoist API: Look up a Project/Label/Task ID
ID = "id"
# Todoist API: Fetch all labels
# Service Call: What are the labels attached to this task?
LABELS = "labels"
# Todoist API: "Name" value
NAME = "name"
# Attribute: Is this task overdue?
OVERDUE = "overdue"
# Todoist API: Get a project's parent id if available
PARENT_ID="parent_id"
# Attribute: What is this project's parents?
PARENT_SUMMARY="parent_summary"
# Attribute: What is this task's priority?
# Todoist API: Get a task's priority
# Service Call: What is this task's priority?
PRIORITY = "priority"
# Attribute: What is the colour id for the project?
PROJECT_COLOUR = "color"
# Todoist API: Look up the Project ID a Task belongs to
PROJECT_ID = "project_id"
# Todoist API: Look up the Project Child Order a Project belongs to
PROJECT_ORDER = "child_order"
# Service Call: What Project do you want a Task added to?
PROJECT_NAME = "project"
# Todoist API: Fetch all Projects
PROJECTS = "projects"
# Attribute: Is this a recurring task?
# Todoist API: Get a task's recurrence
RECURRING_STATE = "is_recurring"
# Calendar Platform: When does a calendar event start?
START = "start"
# Calendar Platform: What is the next calendar event about?
SUMMARY = "summary"
# Todoist API: Fetch all Tasks
TASKS = "items"
DOMAIN = "todoist"
SERVICE_NEW_TASK = "new_task"
DAYSWITCHER = {
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
7: "Sunday"
}
MONTHSWITCHER = {
1: "Jan",
2: "Feb",
3: "Mar",
4: "Apr",
5: "May",
6: "Jun",
7: "Jul",
8: "Aug",
9: "Sep",
10: "Oct",
11: "Nov",
12: "Dec"
}
|
StarcoderdataPython
|
1799461
|
# importing testing framwork
import pytest
# library used to check working virtual environment
import importlib
# importing objects from the jupyter notebook here
from ipynb.fs.full.index import # variable names go here
# format for writing tests
# all functions that are to be run by test suite *must* be prepended with test_
def test_name_of_test_here():
assert True, "AssertionError will *not* raise and this message will not show"
assert False, "AssertionError will raise and output this message in the trace"
# tests to ensure correct environment is loaded
def test_conda_environment_activated():
assert importlib.util.find_spec("obscure"), "It looks like you didn't 'conda activate learn-env' - try that then run the test again!"
|
StarcoderdataPython
|
1725279
|
class Solution:
def maxAncestorDiff(self, root: TreeNode, ancestors = []) -> int:
if root == None:
return 0
curr = 0
if ancestors:
curr = max(abs(root.val - max(ancestors)), abs(root.val - min(ancestors)))
ancestors.append(root.val)
left = self.maxAncestorDiff(root.left)
right = self.maxAncestorDiff(root.right)
ancestors.pop()
return max([curr, left, right])
|
StarcoderdataPython
|
153031
|
<gh_stars>100-1000
"""
train the XGB_HMM model, return A, B(XGB model), pi
"""
import numpy as np
from XGB_HMM.GMM_HMM import GMM_HMM
from XGB_HMM.re_estimate import re_estimate
from XGB_HMM.predict import self_pred
from XGB_HMM.xgb import self_xgb
def XGB_HMM(O, lengths, verbose=True):
n_states = 3
stop_flag = 0
iteration = 1
log_likelihood = -np.inf
min_delta = 1e-4
S, A, gamma = GMM_HMM(O, lengths, n_states, verbose=True)
prior_pi = np.array([sum(S == i) / len(S) for i in range(n_states)])
# model = self_xgb(O, gamma, n_states)
model = 1
# B_Matrix = form_B_matrix_by_DNN(model, O, prior_pi)
B_Matrix = gamma / prior_pi
record_log_likelihood = []
best_result = [] # record the result A, B(xgb model), prior pi, best_log_likelihood
while stop_flag <= 3:
A, gamma = re_estimate(A, B_Matrix, prior_pi, lengths)
# pickle.dump([O, gamma], open('C:/Users/Administrator/Desktop/HMM_program/save/temp.pkl', 'wb'))
# model = self_xgb(O, gamma, n_states)
# model = self_DNN(O, gamma)
# B_Matrix = form_B_matrix_by_DNN(model, O, prior_pi)
B_Matrix = gamma / prior_pi
new_S, _, new_log_likelihood = self_pred(B_Matrix, lengths, A, prior_pi)
record_log_likelihood.append(new_log_likelihood)
if len(best_result) == 0:
best_result = [A, model, prior_pi, new_log_likelihood]
elif new_log_likelihood > best_result[3]:
best_result = [A, model, prior_pi, new_log_likelihood]
temp = gamma
if new_log_likelihood - log_likelihood <= min_delta:
stop_flag += 1
else:
stop_flag = 0
log_likelihood = new_log_likelihood
iteration += 1
if verbose:
print(new_log_likelihood)
model = self_xgb(O, temp, n_states)
best_result[1] = model
return best_result[0], best_result[1], best_result[2]
|
StarcoderdataPython
|
4805908
|
<reponame>shwnchpl/abrv
import os
from getpass import getuser
from flask import Flask
from . import db
from . import url
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE={
'database': 'abrv',
'user': getuser(),
'password': '',
}
)
if test_config is None:
try:
app.config.from_pyfile('config.py', silent=False)
except FileNotFoundError:
app.logger.warning('Running with insecure built-in configuration.')
else:
app.config.from_mapping(test_config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
db.init_app(app)
app.register_blueprint(url.bp)
return app
|
StarcoderdataPython
|
1629945
|
import sys
#output and error redirection
sys.stdout = open("./stdout.txt","w+b",0)
sys.stderr = open("./stderr.txt","w+b",0)
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.graphics import *
from kivy.event import *
from kivy.graphics.transformation import Matrix
from kivy.clock import Clock
from kivy.properties import *
from kivy.interactive import *
#from kivy.uix.codeinput import CodeInput
#from pygments.lexers import CythonLexer
from plugsLoader import PlugsLoader
import pickle
app = None
class Modeler(Widget,EventDispatcher):
plugIndex = NumericProperty(0)
def __init__(self, plugs_loader):
super(Modeler, self).__init__()
self.plugs_loader = plugs_loader
self.plugs = {}
self.selectedPlug = None
Clock.schedule_interval(self.clock, 1.0/60.0)
def __del__(self):
for m in self.plugs.values():
if '__del__' in dir(m):
m.__del__()
def on_press(self):
print( 'Modeler on_press' )
def clock(self,*largs):
for k in self.plugs.keys():
self.plugs[k].clock(self)
if self.plugs[k].deletable:
del(self.plugs[k])
def get_at(self,x,y):
for m in self.plugs.values():
if m.touch_quest(x,y):
return m
return None
def get_at_multiple(self,x,y):
ret = list()
for m in self.plugs.values():
if m.touch_quest(x,y):
ret.append(m)
return ret
def on_touch_down(self,touch):
super(Modeler,self).on_touch_down(touch)
t = touch
t.x = t.x - t.x%50 +25
t.y = t.y - t.y%50 +25
print(dir(touch))
if not self.collide_point(t.x,t.y):
return False
selected = False
for m in self.plugs.values():
if m.touched(t):
selected = True
self.selectedPlug = m
if not selected:
plug = self.plugs_loader.build_selected( self.canvas, t )
if plug != None:
self.plugs[ self.plugIndex ] = plug
self.plugIndex = self.plugIndex + 1
self.selectedPlug = plug
def on_touch_move(self,touch):
pass
def on_touch_up(self,touch):
pass
def delete_plug(self,bt_instance):
if self.selectedPlug!=None:
self.selectedPlug.remove()
self.selectedPlug.deletable = True
class InteractiveShell(BoxLayout):
def __init__(self):
super(InteractiveShell,self).__init__(orientation='vertical',size_hint=(0.3,1.0))
self.text_input = TextInput(font_size='10px',multiline = True,size_hint=(1.0,0.9) )
self.code_input = TextInput(font_size='10px',size_hint=(0.9,1.0)) #CodeInput(lexer=CythonLexer(),size_hint=(0.9,1.0))
cmd = Button(text='exec',size_hint=(0.1,1.0))
cmd.bind(on_press=self.exec_code)
hbox = BoxLayout(orientation='horizontal',size_hint=(1.0,0.1))
hbox.add_widget( self.code_input )
hbox.add_widget( cmd )
self.add_widget( hbox )
self.add_widget( self.text_input )
def exec_code(self,bt_instance):
global app
#self.text_input.text = self.text_input.text + '\n' + ">>> " + self.code_input.text
print( ">>> " + self.code_input.text )
try:
exec(self.code_input.text)
f = open("./stdout.txt","r+b")
self.text_input.text = f.read()
f.close()
except Exception as e:
self.text_input.text = self.text_input.text + '\n' + "____exception: " + repr(e)
print("____exception: " + repr(e))
class TestPlatform(App):
def build(self):
root = BoxLayout(orientation='horizontal')
plugs_loader = PlugsLoader()
self.modeler = Modeler( plugs_loader )
bt_delPlug = Button(text='del',size_hint=(1.0,0.2))
bt_delPlug.bind(on_press=self.modeler.delete_plug)
vbox = BoxLayout(orientation='vertical',size_hint=(0.2,1.0))
vbox.add_widget( bt_delPlug )
vbox.add_widget( plugs_loader )
root.add_widget( vbox )
root.add_widget( self.modeler )
#root.add_widget( InteractiveShell() )
return root
def __del__(self):
self.modeler.__del__()
if __name__ == "__main__":
global app
app = TestPlatform()
app.run()
#interactiveLauncher = InteractiveLauncher(t)
#interactiveLauncher.run()
app.__del__()
|
StarcoderdataPython
|
3301408
|
<reponame>pdiroot/checkov
from __future__ import annotations
from typing import Any
from checkov.common.checks.base_check_registry import BaseCheckRegistry
from checkov.common.output.report import CheckType
from checkov.common.parsers.json import parse
from checkov.common.parsers.node import DictNode
from checkov.common.runners.object_runner import Runner as ObjectRunner
class Runner(ObjectRunner):
check_type = CheckType.JSON
def import_registry(self) -> BaseCheckRegistry:
from checkov.json_doc.registry import registry
return registry
def _parse_file(self, f: str) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | None:
if not f.endswith(".json"):
return None
content: tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | None = parse(f)
return content
def get_start_end_lines(self, end: int, result_config: DictNode, start: int) -> tuple[int, int]:
start = result_config.start_mark.line
end = result_config.end_mark.line
return end, start
|
StarcoderdataPython
|
3385539
|
# from transformers import BertModel, BertTokenizer
from torch import nn
from transformers import BertTokenizer
import torch
from experiment.bert_models.modeling_bert import BertModel, BertConfig
from experiment.bert_utils import BertWrapperModel
from experiment.qa.model import BaseModel
class BertSigmoid(BaseModel):
_MODEL = BertModel
def __init__(self, from_pretrained, model_name=None, cache_dir=None, config=None, num_labels=1):
super(BertSigmoid, self).__init__(from_pretrained, model_name=model_name, cache_dir=cache_dir, config=config)
assert num_labels == 1
self.num_labels = num_labels
self.dropout = nn.Dropout(self.config.hidden_dropout_prob)
self.lin_layer = nn.Linear(self.config.hidden_size, num_labels)
self.sigmoid = nn.Sigmoid()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None, tasks=None):
encoded_layers, pooled_output = self.bert(
input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, position_ids=None,
head_mask=head_mask, tasks=tasks
)[:2]
# sent_encoding = pooled_output
sent_encoding = encoded_layers[:, 0, :]
# sent_encoding = self.dropout(sent_encoding)
sent_encoding = self.lin_layer(sent_encoding)
return self.sigmoid(sent_encoding)
def average_standard_bert_output(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
encoded_layers, _ = self.bert(
input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, position_ids=None,
head_mask=head_mask
)[:2]
attention_mask = (attention_mask == 0).float().to(encoded_layers.device)
length = torch.sum(attention_mask, dim=1)
attention_mask = attention_mask[:,:,None].repeat((1,1, encoded_layers.size()[-1]))
encoded_layers = encoded_layers * attention_mask
layer_sum = torch.sum(encoded_layers, dim=1)
mean = layer_sum / length[:,None].repeat(1,encoded_layers.size()[-1])
return mean
class BertSigmoidModel(BertWrapperModel):
_MODEL_CLASS = BertSigmoid
_TOKENIZER_CLASS = BertTokenizer
_CONFIG_CLASS = BertConfig
component = BertSigmoidModel
|
StarcoderdataPython
|
3297726
|
<gh_stars>1-10
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from mmocr.utils import convert_annotations
def collect_files(data_dir):
"""Collect all images and their corresponding groundtruth files.
Args:
data_dir (str): The directory to dataset
Returns:
files (list): The list of tuples (img_file, groundtruth_file)
"""
assert isinstance(data_dir, str)
assert data_dir
ann_list, imgs_list = [], []
for video_dir in os.listdir(data_dir):
for frame_dir in os.listdir(osp.join(data_dir, video_dir)):
crt_dir = osp.join(data_dir, video_dir, frame_dir)
if not osp.isdir(crt_dir):
continue
for crt_file in os.listdir(crt_dir):
if crt_file.endswith('xml'):
ann_path = osp.join(crt_dir, crt_file)
img_path = osp.join(crt_dir,
crt_file.replace('xml', 'png'))
if os.path.exists(img_path):
ann_list.append(ann_path)
imgs_list.append(img_path)
else:
continue
files = list(zip(imgs_list, ann_list))
assert len(files), f'No images found in {data_dir}'
print(f'Loaded {len(files)} images from {data_dir}')
return files
def collect_annotations(files, nproc=1):
"""Collect the annotation information.
Args:
files (list): The list of tuples (image_file, groundtruth_file)
nproc (int): The number of process to collect annotations
Returns:
images (list): The list of image information dicts
"""
assert isinstance(files, list)
assert isinstance(nproc, int)
if nproc > 1:
images = mmcv.track_parallel_progress(
load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
def load_img_info(files):
"""Load the information of one image.
Args:
files (tuple): The tuple of (img_file, groundtruth_file)
Returns:
img_info (dict): The dict of the img and annotation information
"""
assert isinstance(files, tuple)
img_file, gt_file = files
assert osp.basename(gt_file).split('.')[0] == osp.basename(img_file).split(
'.')[0]
# read imgs while ignoring orientations
img = mmcv.imread(img_file, 'unchanged')
img_info = dict(
file_name=osp.join(osp.basename(img_file)),
height=img.shape[0],
width=img.shape[1],
segm_file=osp.join(osp.basename(gt_file)))
if osp.splitext(gt_file)[1] == '.xml':
img_info = load_xml_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info
def load_xml_info(gt_file, img_info):
"""Collect the annotation information.
The annotation format is as the following:
<annotation>
<object>
<name>hierarchy</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>657</xmin>
<ymin>467</ymin>
<xmax>839</xmax>
<ymax>557</ymax>
</bndbox>
</object>
</annotation>
Args:
gt_file (str): The path to ground-truth
img_info (dict): The dict of the img and annotation information
Returns:
img_info (dict): The dict of the img and annotation information
"""
obj = ET.parse(gt_file)
root = obj.getroot()
anno_info = []
for obj in root.iter('object'):
x = max(0, int(obj.find('bndbox').find('xmin').text))
y = max(0, int(obj.find('bndbox').find('ymin').text))
xmax = int(obj.find('bndbox').find('xmax').text)
ymax = int(obj.find('bndbox').find('ymax').text)
w, h = abs(xmax - x), abs(ymax - y)
bbox = [x, y, w, h]
segmentation = [x, y, x + w, y, x + w, y + h, x, y + h]
anno = dict(
iscrowd=0,
category_id=1,
bbox=bbox,
area=w * h,
segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training, val and test set of Lecture Video DB ')
parser.add_argument('root_path', help='Root dir path of Lecture Video DB')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
root_path = args.root_path
for split in ['train', 'val', 'test']:
print(f'Processing {split} set...')
with mmcv.Timer(print_tmpl='It takes {}s to convert LV annotation'):
files = collect_files(osp.join(root_path, 'imgs', split))
image_infos = collect_annotations(files, nproc=args.nproc)
convert_annotations(
image_infos, osp.join(root_path,
'instances_' + split + '.json'))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
17998
|
<filename>days/day01/part2.py<gh_stars>0
from helpers import inputs
def solution(day):
depths = inputs.read_to_list(f"inputs/{day}.txt")
part2_total = 0
for index, depth in enumerate(depths):
if index - 3 >= 0:
current_window = (
int(depth) + int(depths[index - 1]) + int(depths[index - 2])
)
previous_window = (
int(depths[index - 1])
+ int(depths[index - 2])
+ int(depths[index - 3])
)
diff = current_window - previous_window
if diff > 0:
part2_total += 1
return f"Day 01 Part 2 Total Depth Increase: {part2_total}"
|
StarcoderdataPython
|
3238967
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""Placeholder."""
import os
from pathlib import Path
from typing import Callable, List
import click
def sm_protocol(
model: str = "model",
output: str = "output",
channels: List[str] = ["train", "test", "validation"],
channel_prefix: str = "data",
) -> Callable:
"""Create an arg parser that implements minimum SageMaker entrypoint protocol.
Only model, output, and channel dirs are implemented, as this is typically bare minimum to run or test an
entrypoint script locally, e.g., `python ./entrypoint.py`.
See https://github.com/aws/sagemaker-containers/blob/master/README.rst#important-environment-variables.
This function must be imported using as follows:
>>> from smepu import click as smclick
>>> @smclick.sm_protocol()
>>> ...
or
>>> from smepu.click import sm_protocol
>>> @sm_protocol()
>>> ...
This is intentionally done to allow smepu package to still importable even without click installed.
Args:
model (str, optional): Model dir when not running on SageMaker. Defaults to "model".
output (str, optional): Output dir when not running on SageMaker. Defaults to "output".
channels (List[str], optional): Data channels. Defaults to ["train", "test", "validation"].
channel_prefix (str, optional): Parent directory that contains the channel dirs. Defaults to "data".
Returns:
Callable: the decoratee.
"""
def decorator(f):
# Need to add options in reverse order than f's args.
# CLI hyperparameters that belong to the wrapped function.
# See https://click.palletsprojects.com/en/7.x/advanced/#forwarding-unknown-options
opts = click.argument("train_args", nargs=-1, type=click.UNPROCESSED)(f)
for channel in channels[::-1]:
opts = click.option(
f"--{channel}",
default=os.environ.get("SM_CHANNEL_{channel.upper()}", os.path.join(channel_prefix, channel)),
help=f"Where to read input channel {channel}",
type=Path,
)(opts)
opts = click.option(
"--output-data-dir",
default=os.environ.get("SM_OUTPUT_DATA_DIR", output),
help="Where to output additional artifacts",
type=Path,
)(opts)
opts = click.option(
"--model-dir",
default=os.environ.get("SM_MODEL_DIR", model),
help="Where to output model artifacts",
type=Path,
)(opts)
return click.command(context_settings={"ignore_unknown_options": True})(opts)
return decorator
|
StarcoderdataPython
|
1713864
|
<reponame>Errare-humanum-est/HeteroGen
# Copyright (c) 2021. <NAME>
# Copyright (c) 2021. University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from typing import List, Tuple, Union
from DataObjects.FlowDataTypes.ClassBaseAccess import Access, Evict
from DataObjects.Transitions.ClassTransitionv2 import Transition_v2
from DataObjects.CommClassification.ClassBaseCommClass import CommClassState
from Debug.Monitor.ClassDebug import Debug
## State_v2
#
# State container
# Dependency: ClassCommFunc, StateSetsNetworkx
class State_v2(CommClassState, Debug):
def __init__(self, name: str):
# Add commclass state property
CommClassState.__init__(self)
Debug.__init__(self)
self.state: str = name
self.stable: bool = False
# A list of ssp_transitions starting in this state
self.state_trans: List[Transition_v2] = []
self.access_permissions: List[Union[Access, Evict]] = []
# Dependency: StateSetsNetworkx
# First StateSets assignment must be performed
self.start_state_set = []
self.end_state_set = []
def __str__(self):
return self.state
# Don't define __hash__ and __eq__ function as state objects won't be found in networkx dict anymore if changed
# externally. Hence use custom hashing
def custom_hash(self):
return hash((str(self.state), (hash(trans) for trans in self.state_trans)))
####################################################################################################################
# STATE TRANSITION
####################################################################################################################
def add_transitions(self, transitions: [Transition_v2, List[Transition_v2]]):
tmp_trans = transitions
if not isinstance(transitions, list):
tmp_trans = [transitions]
for transition in tmp_trans:
self.add_transition(transition)
def add_transition(self, transition: Transition_v2):
if hash(transition) not in [hash(trans) for trans in self.state_trans]:
self.pwarning("The transition start state and the assigned state do not match: " + str(transition),
transition.start_state != self)
self.state_trans.append(transition)
self.add_classify_trans(transition)
def remove_transitions(self, transitions: [Transition_v2, List[Transition_v2]]):
if not isinstance(transitions, List):
transitions = [transitions]
for transition in transitions:
if transition in self.state_trans:
self.state_trans.remove(transition)
self.remove_classify_trans(transition)
####################################################################################################################
# Compound controller function
####################################################################################################################
def get_compound_states(self) -> Tuple['State_v2']:
return tuple([self])
|
StarcoderdataPython
|
124541
|
import metrics.evaluation as e
import metrics.evaluation
def get_evaluation(y_test, y_pred):
flat_y_test = [i for subl in y_test for i in subl]
flat_y_pred = [i for subl in y_pred for i in subl]
H,I,D = e.compute_HID(flat_y_test, flat_y_pred)
Precision, Recall, FScore = e.compute_PRF(H,I,D)
evaluation = {}
evaluation["E-count_test"] = flat_y_test.count('E-SEG')
evaluation["S-count_test"] = flat_y_test.count('S-SEG')
evaluation["E-count_pred"] = flat_y_pred.count('E-SEG')
evaluation["S-count_pred"] = flat_y_pred.count('S-SEG')
evaluation["H"]=H
evaluation["I"]=I
evaluation["D"]=D
evaluation["Precision"] = Precision
evaluation["Recall"] = Recall
evaluation["F-score"] = FScore
return FScore
def get_evaluation_BM(y_test, y_pred):
flat_y_test = [i for subl in y_test for i in subl]
flat_y_pred = [i for subl in y_pred for i in subl]
flat_y_test = metrics.evaluation.fromBM2BMES(flat_y_test)
flat_y_pred = metrics.evaluation.fromBM2BMES(flat_y_pred)
H,I,D = e.compute_HID(flat_y_test, flat_y_pred)
Precision, Recall, FScore = e.compute_PRF(H,I,D)
evaluation = {}
evaluation["E-count_test"] = flat_y_test.count('E-SEG')
evaluation["S-count_test"] = flat_y_test.count('S-SEG')
evaluation["E-count_pred"] = flat_y_pred.count('E-SEG')
evaluation["S-count_pred"] = flat_y_pred.count('S-SEG')
evaluation["H"]=H
evaluation["I"]=I
evaluation["D"]=D
evaluation["Precision"] = Precision
evaluation["Recall"] = Recall
evaluation["F-score"] = FScore
return FScore
|
StarcoderdataPython
|
1685009
|
<filename>dit/pid/tests/test_imin.py
"""
Tests for dit.pid.imin.
"""
import pytest
from dit.pid.imin import PID_WB
from dit.pid.distributions import bivariates, trivariates
def test_pid_wb1():
"""
Test imin on a generic distribution.
"""
d = bivariates['prob. 1']
pid = PID_WB(d, ((0,), (1,)), (2,))
assert pid[((0,), (1,))] == pytest.approx(0.019973094021974794)
assert pid[((0,),)] == pytest.approx(0.15097750043269376)
assert pid[((1,),)] == pytest.approx(0.0)
assert pid[((0, 1),)] == pytest.approx(0.0)
def test_pid_wb2():
"""
Test imin on another generic distribution.
"""
d = trivariates['sum']
pid = PID_WB(d, [[0], [1], [2]], [3])
for atom in pid._lattice:
if atom == ((0,), (1,), (2,)):
assert pid[atom] == pytest.approx(0.31127812445913294)
elif atom == ((0, 1), (0, 2), (1, 2)):
assert pid[atom] == pytest.approx(0.5)
elif atom == ((0, 1, 2),):
assert pid[atom] == pytest.approx(1.0)
else:
assert pid[atom] == pytest.approx(0.0)
def test_pid_wb3():
"""
Test imin on a generic distribution.
"""
d = bivariates['jeff']
pid = PID_WB(d)
assert pid.complete
assert pid.nonnegative
assert pid.consistent
|
StarcoderdataPython
|
1764952
|
<reponame>imjal/EventCenterTrack<filename>src/lib/dataset/datasets/prophesee_src/visualize/vis_utils.py
"""
Functions to display events and boxes
Copyright: (c) 2019-2020 Prophesee
"""
from __future__ import print_function
import numpy as np
import cv2
LABELMAP = ["car", "pedestrian"]
def make_binary_histo(events, img=None, width=304, height=240):
"""
simple display function that shows negative events as blacks dots and positive as white one
on a gray background
args :
- events structured numpy array
- img (numpy array, height x width x 3) optional array to paint event on.
- width int
- height int
return:
- img numpy array, height x width x 3)
"""
if img is None:
img = 127 * np.ones((height, width, 3), dtype=np.uint8)
else:
# if an array was already allocated just paint it grey
img[...] = 127
if events.size:
assert events['x'].max() < width, "out of bound events: x = {}, w = {}".format(events['x'].max(), width)
assert events['y'].max() < height, "out of bound events: y = {}, h = {}".format(events['y'].max(), height)
img[events['y'], events['x'], :] = 255 * events['p'][:, None]
return img
def draw_bboxes(img, boxes, labelmap=LABELMAP):
"""
draw bboxes in the image img
"""
colors = cv2.applyColorMap(np.arange(0, 255).astype(np.uint8), cv2.COLORMAP_HSV)
colors = [tuple(*item) for item in colors.tolist()]
for i in range(boxes.shape[0]):
pt1 = (int(boxes['x'][i]), int(boxes['y'][i]))
size = (int(boxes['w'][i]), int(boxes['h'][i]))
pt2 = (pt1[0] + size[0], pt1[1] + size[1])
score = boxes['class_confidence'][i]
class_id = boxes['class_id'][i]
class_name = labelmap[class_id % len(labelmap)]
color = colors[class_id * 60 % 255]
center = ((pt1[0] + pt2[0]) // 2, (pt1[1] + pt2[1]) // 2)
cv2.rectangle(img, pt1, pt2, color, 1)
cv2.putText(img, class_name, (center[0], pt2[1] - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
cv2.putText(img, str(score), (center[0], pt1[1] - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
|
StarcoderdataPython
|
1736190
|
#!/usr/bin/python3
'''Send messages to queues in rabbitmq'''
import contextlib
from typing import Iterator
import argparse
import datetime
import pika
@contextlib.contextmanager
def connect(
args: argparse.Namespace
) -> Iterator[pika.adapters.blocking_connection.BlockingChannel]:
'''Connects to rabbitmq with the arguments provided.'''
username = args.rabbitmq_username
password = args.rabbitmq_password
credentials = pika.PlainCredentials(username, password)
parameters = pika.ConnectionParameters(
'rabbitmq',
5672,
'/',
credentials,
heartbeat=600,
# mypy does not support structural typing yet
# https://github.com/python/mypy/issues/3186
blocked_connection_timeout=300.0, # type: ignore
)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='certificates',
exchange_type='direct',
durable=True)
try:
yield channel
finally:
connection.close()
def configure_parser(parser: argparse.ArgumentParser) -> None:
'''Add rabbitmq-related arguments to `parser`'''
parser.add_argument('--rabbitmq-username', type=str,
help='rabbitmq username',
default='omegaup')
parser.add_argument('--rabbitmq-password', type=str,
help='rabbitmq password',
default='<PASSWORD>')
parser.add_argument('--date-lower-limit',
type=lambda s:
datetime.datetime.strptime(s, '%Y-%m-%d'),
help='date lower limit',
default=datetime.date(2005, 1, 1))
parser.add_argument('--date-upper-limit',
type=lambda s:
datetime.datetime.strptime(s, '%Y-%m-%d'),
help='date upper limit',
default=datetime.date.today())
|
StarcoderdataPython
|
1672561
|
#!/usr/bin/env python
from pyknon.plot import plot2, plot2_bw
from pyknon.simplemusic import inversion
def plot_color():
n1 = [11, 10, 7]
for x in range(12):
plot2(n1, inversion(n1, x), "ex-inversion-plot-{0:02}.ps".format(x))
n2 = [1, 3, 7, 9, 4]
plot2(n2, inversion(n2, 9), "ex-inversion-plot.ps")
def plot_black_and_white():
n1 = [11, 10, 7]
for x in range(12):
plot2_bw(n1, inversion(n1, x), "ex-inversion-plot-bw-{0:02}.ps".format(x))
n2 = [1, 3, 7, 9, 4]
plot2_bw(n2, inversion(n2, 9), "ex-inversion-plot-bw.ps")
if __name__ == "__main__":
plot_color()
plot_black_and_white()
|
StarcoderdataPython
|
1600588
|
<filename>package/layers/metrics/__init__.py
from .depth_metrics import *
|
StarcoderdataPython
|
190146
|
<reponame>isacasini/SNV_Xia_et_al_2020
# This file contains the following function(s): readtxt, readtxt2df
import pandas as pd
def readtxt(filepathname):
"""Skips a line that starts with “>” and then reads in the rest of the file while removing any new lines (“\n”)"""
sequence = ""
for line in open(filepathname, 'r'):
if line[0] != ">":
sequence += line.replace("\n","")
return sequence
def readtxt2df(filepathname):
"""Makes a pandas dataframe from a tab delimited csv file, skipping the 1st row and using the 2nd as a header"""
# Needs to be a string with the path and file
file = filepathname
# skips the first row, sets the second row as header, uses tab as a delimiter
data = pd.read_csv(filepathname, sep="\t", header=0 ,encoding="ISO-8859-1")#,dtype={"PubChem":int,"PubChem":np.nan})
return data
|
StarcoderdataPython
|
150600
|
# This script is borrowed from https://github.com/mkocabas/VIBE
# Adhere to their licence to use this script
from psypose.MEVA.meva.dataloaders import Dataset2D
from psypose.MEVA.meva.utils.video_config import PENNACTION_DIR
class PennAction(Dataset2D):
def __init__(self, seqlen, overlap=0.75, debug=False):
db_name = 'pennaction'
super(PennAction, self).__init__(
seqlen = seqlen,
folder=PENNACTION_DIR,
dataset_name=db_name,
debug=debug,
overlap=overlap,
)
|
StarcoderdataPython
|
39066
|
#!/usr/bin/python3.7
#Author: <NAME>
import sys
import os
import math
import re
import numpy as np
#print('usage: <>.py <file.pdb> \nexecute nsc to generate point-based surface and create tables and if verbose==1 files dotslabel1.xyzrgb dotslabel2.xyzrgb dotslabel3.xyzrgb and dotslabel4.xyzrgb\n')
def pdbsurface(filepdb,nscexe):
verbose=0
#label1 {H, Cl, Br, I} white/grey 0.9 0.9 0.9
#label2 {O, N, S, F} red 1 0 0
#label3 {C, P, B} green 0 1 0
#label4 {others} blue 0 0 1
tabR= {'C':'%.2f' % 1.70, 'O':1.52, 'N':1.55, 'S':1.80, 'P':1.80, 'B':1.72, 'Br':1.85, 'Cl':1.75, 'I':1.98, 'F':1.47, 'H':'%.2f' % 1.20, 'Hp':'%.2f' % 1.10, 'X':'%.2f' % 1.10}
label= {'C':3, 'P':3, 'B':3, 'O':2, 'N':2, 'S':2, 'F':2, 'Hp':2, 'H':1, 'Cl':1, 'Br':1, 'I':1}
rgb= np.array([[0, 0, 0], [0.9, 0.9, 0.9], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
espace5=' '
espace6=' '
fichier2D=0
filepdb=open(filepdb,'r')
getstr=filepdb.read().split('\n')
filepdb.close()
tabLignesPdb=[]
tabLignesPdb.append('')
compt=1
while (compt < len(getstr)):
tabLignesPdb.append(re.split('\s+', getstr[compt].strip()))
compt=compt+1
compt=1
comptatomes=0
getx=[]
getx.append('')
gety=[]
gety.append('')
getz=[]
getz.append('')
getA=[]
getA.append('')
getRayon=[]
getRayon.append('')
while (compt < len(tabLignesPdb)):
if (tabLignesPdb[compt][0] == 'HETATM' or tabLignesPdb[compt][0] == 'ATOM'):
xAtome=float(tabLignesPdb[compt][5])
yAtome=float(tabLignesPdb[compt][6])
zAtome=float(tabLignesPdb[compt][7])
getx.append(xAtome)
gety.append(yAtome)
getz.append(zAtome)
if (float(zAtome) == 0):
fichier2D=fichier2D+1
getA.append(tabLignesPdb[compt][2])
if(getA[compt]!='C' and getA[compt]!='O' and getA[compt]!='N' and getA[compt]!='P' and getA[compt]!='B' and getA[compt]!='H' and getA[compt]!='F' and getA[compt]!='Br' and getA[compt]!='Cl' and getA[compt]!='S' and getA[compt]!='I' and getA[compt]!='X' and getA[compt]!='Hp'):
print("Warning: atom %s set as C because it is not the tab (unusual in medchem)" % getA[compt])
getA[compt]='C'
getRayon.append(tabR[getA[compt]])
comptatomes=comptatomes+1
compt=compt+1
nbatomes=comptatomes
if (fichier2D==int(nbatomes)):
print("Warning: pdb file in 2D; SenSaaS needs 3D coordinates to work properly")
compt=1
while (compt <= nbatomes):
if (getA[compt] == 'H'):
compt2=1
while(compt2 <= nbatomes):
if (getA[compt2] == 'N' or getA[compt2] == 'O'):
distHp= math.sqrt((getx[compt] - getx[compt2])**2 + (gety[compt] - gety[compt2])**2 + (getz[compt] - getz[compt2])**2)
if (distHp <= 1.2):
getRayon[compt]=tabR['Hp']
compt2=compt2+1
compt=compt+1
#nsc:
compt=1
psaIn=open('psa.in','w')
psaIn.write('* XYZR\n')
psaIn.write(espace6+str(nbatomes)+'\n')
while (compt <= nbatomes):
x='%.2f' % getx[compt]
y='%.2f' % gety[compt]
z='%.2f' % getz[compt]
psaIn.write('%8s %8s %8s %8s %8s \n'%(x,y,z,getRayon[compt],getA[compt]))
compt=compt+1
psaIn.close()
cmd = '%s psa.in ' % (nscexe)
os.system(cmd)
psaOut=open('psa.out', 'r')
lignepsaOut= psaOut.readlines()
psaOut.close()
tabLignesPsaOut=[]
compt=3
while (compt < len(lignepsaOut)):
tabLignesPsaOut.append(re.split('\s+', lignepsaOut[compt].strip()))
compt=compt+1
nbDots= int(tabLignesPsaOut[0][2])
#print("nbDots= %6s" % (nbDots))
del tabLignesPsaOut[0]
del tabLignesPsaOut[0]
getDots=np.empty(shape=[nbDots,3], dtype='float64')
getrgb=np.empty(shape=[nbDots,3], dtype='float64')
compt=nbatomes+2
comptDots=0
ligneFicDots=[]
label1=[]
label2=[]
label3=[]
label4=[]
if(verbose==1):
dotsFic=open('dots.xyzrgb', 'w')
while (compt < nbatomes+nbDots+2):
xDot=float(tabLignesPsaOut[compt][2])
yDot=float(tabLignesPsaOut[compt][3])
zDot=float(tabLignesPsaOut[compt][4])
compt2=1
m=100
mi=0
while(compt2 <= nbatomes):
xa=getx[compt2]
ya=gety[compt2]
za=getz[compt2]
goodDots= math.sqrt((xDot - xa)**2 + (yDot - ya)**2 + (zDot - za)**2)
if(goodDots < m):
m=goodDots
mi=compt2
compt2=compt2+1
atomeCorrespondant=getA[mi]
rgbi=label[atomeCorrespondant]
if(getRayon[mi]==tabR['Hp']):
rgbi=label['O']
getrgb[comptDots,:]=[rgb[rgbi,0], rgb[rgbi,1], rgb[rgbi,2]]
getDots[comptDots,:]=[xDot,yDot,zDot]
if (rgbi == 1):
label1.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 2):
label2.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 3):
label3.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 4):
label4.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
else:
print("no label for dot no %5s ?\n" %(comptDots))
if(verbose==1):
dotsFic.write('%8s'%xDot+'%8s'%yDot+'%8s'%zDot+espace5+'%5s'%(rgb[rgbi,0])+'%5s'%(rgb[rgbi,1])+'%5s'%(rgb[rgbi,2])+'\n')
comptDots=comptDots+1
compt=compt+1
if(verbose==1):
dotsFic.close()
dotslabel1=open('dotslabel1.xyzrgb', 'w')
dotslabel2=open('dotslabel2.xyzrgb', 'w')
dotslabel3=open('dotslabel3.xyzrgb', 'w')
dotslabel4=open('dotslabel4.xyzrgb', 'w')
getDots1=np.empty(shape=[len(label1),3], dtype='float64')
getrgb1=np.empty(shape=[len(label1),3], dtype='float64')
getDots2=np.empty(shape=[len(label2),3], dtype='float64')
getrgb2=np.empty(shape=[len(label2),3], dtype='float64')
getDots3=np.empty(shape=[len(label3),3], dtype='float64')
getrgb3=np.empty(shape=[len(label3),3], dtype='float64')
getDots4=np.empty(shape=[len(label4),3], dtype='float64')
getrgb4=np.empty(shape=[len(label4),3], dtype='float64')
compt=0
while(compt < len(label1)):
getDots1[compt]= label1[compt][0]
getrgb1[compt]= label1[compt][1]
if(verbose==1):
dotslabel1.write('%8s'%getDots1[compt,0]+'%8s'%getDots1[compt,1]+'%8s'%getDots1[compt,2]+espace5+'%5s'%getrgb1[compt,0]+'%5s'%getrgb1[compt,1]+'%5s'%getrgb1[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots2)):
getDots2[compt]= label2[compt][0]
getrgb2[compt]= label2[compt][1]
if(verbose==1):
dotslabel2.write('%8s'%getDots2[compt,0]+'%8s'%getDots2[compt,1]+'%8s'%getDots2[compt,2]+espace5+'%5s'%getrgb2[compt,0]+'%5s'%getrgb2[compt,1]+'%5s'%getrgb2[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots3)):
getDots3[compt]= label3[compt][0]
getrgb3[compt]= label3[compt][1]
if(verbose==1):
dotslabel3.write('%8s'%getDots3[compt,0]+'%8s'%getDots3[compt,1]+'%8s'%getDots3[compt,2]+espace5+'%5s'%getrgb3[compt,0]+'%5s'%getrgb3[compt,1]+'%5s'%getrgb3[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots4)):
getDots4[compt]= label4[compt][0]
getrgb4[compt]= label4[compt][1]
if(verbose==1):
dotslabel4.write('%8s'%getDots4[compt,0]+'%8s'%getDots4[compt,1]+'%8s'%getDots4[compt,2]+espace5+'%5s'%getrgb4[compt,0]+'%5s'%getrgb4[compt,1]+'%5s'%getrgb4[compt,2]+'\n')
compt=compt+1
if(verbose==1):
dotslabel1.close()
dotslabel2.close()
dotslabel3.close()
dotslabel4.close()
else:
os.remove("psa.in")
os.remove("psa.out")
return getDots, getrgb, getDots1, getrgb1, getDots2, getrgb2, getDots3, getrgb3, getDots4, getrgb4
|
StarcoderdataPython
|
3240391
|
<reponame>m-holger/qpdf
from collections import defaultdict
from operator import itemgetter
import re
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, Index
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
# Reference:
# https://www.sphinx-doc.org/en/master/development/tutorials/todo.html
# https://www.sphinx-doc.org/en/master/development/tutorials/recipe.html
# cSpell:ignore contnode
# cSpell:ignore docname
# cSpell:ignore docnames
# cSpell:ignore localname
# cSpell:ignore refnode
# cSpell:ignore signode
class OptionDirective(ObjectDescription):
has_content = True
def handle_signature(self, sig, signode):
signode += addnodes.desc_name(text=sig)
return sig
def add_target_and_index(self, name_cls, sig, signode):
m = re.match(r'^--([^\[= ]+)', sig)
if not m:
raise Exception('option must start with --')
option_name = m.group(1)
signode['ids'].append(f'option-{option_name}')
qpdf = self.env.get_domain('qpdf')
qpdf.add_option(sig, option_name)
class OptionIndex(Index):
name = 'options'
localname = 'qpdf Command-line Options'
shortname = 'Options'
def generate(self, docnames=None):
content = defaultdict(list)
options = self.domain.get_objects()
options = sorted(options, key=itemgetter(0))
# name, subtype, docname, anchor, extra, qualifier, description
for name, display_name, typ, docname, anchor, _ in options:
m = re.match(r'^(--([^\[= ]+))', display_name)
if not m:
raise Exception(
'OptionIndex.generate: display name not as expected')
content[m.group(2)[0].lower()].append(
(m.group(1), 0, docname, anchor, '', '', typ))
content = sorted(content.items())
return content, True
class QpdfDomain(Domain):
name = 'qpdf'
label = 'qpdf documentation domain'
roles = {
'ref': XRefRole()
}
directives = {
'option': OptionDirective,
}
indices = {
OptionIndex,
}
initial_data = {
'options': [], # object list
}
def get_full_qualified_name(self, node):
return '{}.{}'.format('option', node.arguments[0])
def get_objects(self):
for obj in self.data['options']:
yield(obj)
def resolve_xref(self, env, from_doc_name, builder, typ, target, node,
contnode):
match = [(docname, anchor)
for name, sig, typ, docname, anchor, priority
in self.get_objects() if name == f'option.{target[2:]}']
if len(match) > 0:
to_doc_name = match[0][0]
match_target = match[0][1]
return make_refnode(builder, from_doc_name, to_doc_name,
match_target, contnode, match_target)
else:
raise Exception(f'invalid option xref ({target})')
def add_option(self, signature, option_name):
if self.env.docname != 'cli':
raise Exception(
'qpdf:option directives don\'t work outside of cli.rst')
name = f'option.{option_name}'
anchor = f'option-{option_name}'
# name, display_name, type, docname, anchor, priority
self.data['options'].append(
(name, signature, '', self.env.docname, anchor, 0))
def purge_options(self, docname):
self.data['options'] = list([
x for x in self.data['options']
if x[3] != docname
])
def purge_options(app, env, docname):
option = env.get_domain('qpdf')
option.purge_options(docname)
def setup(app):
app.add_domain(QpdfDomain)
app.connect('env-purge-doc', purge_options)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
StarcoderdataPython
|
1795782
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2019-11-15
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import codecs
import shutil
import zipfile
import requests
__all__ = [
"wget", "unzip", "rm", "mkdir", "rmdir", "mv"
]
_CURRENT_FILE = os.path.dirname(__file__)
def wget(url, save_path=None, rename=None):
current_path = os.getcwd()
file_name = url[url.rfind("/")+1:]
if not save_path:
save_path = current_path
if not rename:
rename = file_name
save_path = os.path.abspath(os.path.join(save_path, rename))
print("[wget] downloading from {}".format(url))
start = time.time()
size = 0
response = requests.get(url, stream=True)
chunk_size = 10240
content_size = int(response.headers["content-length"])
if response.status_code == 200:
print("[wget] file size: %.2f MB" %(content_size / 1024 / 1024))
with codecs.open(save_path, "wb") as f:
for data in response.iter_content(chunk_size=chunk_size):
f.write(data)
size += len(data)
print("\r"+"[wget] %s%.2f%%"
%(">"*int(size*50/content_size), float(size/content_size*100)), end="")
end = time.time()
print("\n"+"[wget] complete! cost: %.2fs."%(end-start))
print("[wget] save at: %s" %save_path)
return save_path
def unzip(file_path, save_path=None):
if not save_path:
save_path = os.path.abspath("/".join(os.path.abspath(file_path).split("/")[:-1]))
with zipfile.ZipFile(file_path) as zf:
zf.extractall(save_path)
print("[unzip] file path: {}, save at {}".format(file_path, save_path))
return save_path
def rm(file_path):
file_path = os.path.abspath(file_path)
os.remove(file_path)
print("[remove] file path {}".format(file_path))
return
def mkdir(file_path):
file_path = os.path.abspath(file_path)
os.makedirs(file_path)
print("[mkdir] create directory {}".format(file_path))
return file_path
def rmdir(file_path):
file_path = os.path.abspath(file_path)
shutil.rmtree(file_path)
print("[rmdir] remove directory {}".format(file_path))
return
def mv(from_file_path, to_file_path):
from_file_path = os.path.abspath(from_file_path)
to_file_path = os.path.abspath(to_file_path)
os.rename(from_file_path, to_file_path)
print("[move] move file from {} to {}".format(from_file_path, to_file_path))
return
|
StarcoderdataPython
|
152819
|
<filename>scripts/alcohol_and_cig_cases.py
alcohol_cases = None
with open('../static/alcohol_cases.txt', 'r') as f:
alcohol_cases = f.readlines()
smoke_cases = None
with open('../static/smoke_cases.txt', 'r') as f:
smoke_cases = f.readlines()
with open('../static/alcohol_and_cig_cases.txt', 'w') as f:
for i in smoke_cases:
if i in alcohol_cases:
f.write(i)
|
StarcoderdataPython
|
4826259
|
<gh_stars>0
# -----------------------------------------------------------------------------
# Name: BackupDBWorker.py
# Purpose: Backup observer DB (encrypted only) to an external drive.
#
# Author: <NAME> <<EMAIL>>
#
# Created: Sept 29, 2017
# License: MIT
# ------------------------------------------------------------------------------
import filecmp
import os
import shutil
import arrow
import glob
import zipfile
from PyQt5.QtCore import pyqtSignal, QObject
from py.observer.ObserverConfig import use_encrypted_database
class BackupDBWorker(QObject):
"""
Class to copy encrypted DB without locking up UI
"""
backupStatus = pyqtSignal(bool, str) # Success/Fail, Result Description
def __init__(self, *args, **kwargs):
super().__init__()
self.dest_path = kwargs["dest_path"]
self._is_running = False
def zip_logs(self, log_files, outdir):
"""
If you want to create a new zipped bundle that only contains the python changes (and this assumes
that you haven't added in any new python or QML packages), then you can pass in a value for the
folders_to_zip parameter. For instance, for the Trawl Analyzer software, this would be:
folders_to_zip=['trawl_analyzer/trawl_analyzer.exe', 'trawl_analyzer/py']
This creates a drastically reduced package (in size), making it easier to deploy to remote users. Note that you want to be sure that the QRC > py file has been generated as a precursor, otherwise you might have made changes in your QML files, but they won't get reflected until the pyrcc has actually run to compile those into a qrc.py file
"""
if not os.path.exists(outdir):
raise FileNotFoundError('Log zip error: Cannot find target directory ' + outdir)
output_file = os.path.join(outdir, 'optecs_logs.zip')
# print('Zipping', log_files, 'to', output_file)
zipf = zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED)
for f in log_files:
zipf.write(f)
zipf.close()
def run(self):
self._is_running = True
try:
# Copy Database
db_filename = 'observer_encrypted.db' if use_encrypted_database else 'observer.db'
source_file = os.path.join(os.getcwd(), 'data\\' + db_filename)
if not os.path.isfile(source_file):
self.backupStatus.emit(False, f'Could not find {source_file}. Abort.')
return
if not os.path.isdir(self.dest_path):
os.makedirs(self.dest_path)
date_str = arrow.now().format('YYYYMMDD')
log_filename = f'OptecsEncryptedBackup_{date_str}.db'
dest_full_path = os.path.join(self.dest_path, log_filename)
shutil.copyfile(source_file, dest_full_path)
if not filecmp.cmp(source_file, dest_full_path):
err_msg = f'File compare failed.\nCopied file likely has errors.\nTry new media.'
self.backupStatus.emit(False, err_msg)
# Zip log files
log_file_current = glob.glob('*.log')
log_file_arch = glob.glob('log_archive/*.log')
self.zip_logs(log_file_current + log_file_arch, self.dest_path)
self._is_running = False
self.backupStatus.emit(True, f'SUCCESS: Copied {dest_full_path} + logs')
except Exception as e:
self.backupStatus.emit(False, f'ERROR: {e}')
|
StarcoderdataPython
|
1704929
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 16 03:18:02 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os
import utils
utils.start(__file__)
#==============================================================================
# setting
month_limit = 12 # max: 96
month_round = 1
PREF = 'pos_201'
KEY = 'SK_ID_CURR'
os.system(f'rm ../feature/t*_{PREF}*')
# =============================================================================
#
# =============================================================================
#pos = pd.read_csv('/Users/Kazuki/Home-Credit-Default-Risk/py/sample_POS.csv')
pos = utils.read_pickles('../data/POS_CASH_balance')
pos.drop('SK_ID_PREV', axis=1, inplace=True)
pos = pos[pos['MONTHS_BALANCE']>=-month_limit]
pos['month_round'] = (pos['MONTHS_BALANCE'] / month_round).map(np.floor)
pos.drop('MONTHS_BALANCE', axis=1, inplace=True)
# groupby other credit cards
gr = pos.groupby(['SK_ID_CURR', 'month_round'])
pos_ = gr.size()
pos_.name = 'pos_size'
pos_ = pd.concat([pos_, gr.sum()], axis=1).reset_index() # TODO:NAME_CONTRACT_STATUS
pos_.sort_values(['SK_ID_CURR', 'month_round'], ascending=[True, False], inplace=True)
pos_['CNT_INSTALMENT_FUTURE-dby-CNT_INSTALMENT'] = pos_['CNT_INSTALMENT_FUTURE'] / pos_['CNT_INSTALMENT']
#pos_['-by-'] = pos_[''] / pos_['']
#pos_['-by-'] = pos_[''] / pos_['']
#pos_['-by-'] = pos_[''] / pos_['']
#pos_['-by-'] = pos_[''] / pos_['']
# TODO: pct_change & diff & rolling mean
#gr = pos_.groupby(['SK_ID_CURR'])
#pos_['AMT_BALANCE_pctchng-1'] = gr['AMT_BALANCE'].pct_change(-1)
#pos_['AMT_BALANCE_pctchng-1'] = gr['AMT_BALANCE'].pct_change(-1)
#pos_['AMT_BALANCE_pctchng-1'] = gr['AMT_BALANCE'].pct_change(-1)
pt = pd.pivot_table(pos_, index='SK_ID_CURR', columns=['month_round'])
pt.columns = [f'{PREF}_{c[0]}_t{int(c[1])}' for c in pt.columns]
pt.reset_index(inplace=True)
# =============================================================================
# merge
# =============================================================================
train = utils.load_train([KEY])
test = utils.load_test([KEY])
train_ = pd.merge(train, pt, on=KEY, how='left').drop(KEY, axis=1)
utils.to_feature(train_, '../feature/train')
test_ = pd.merge(test, pt, on=KEY, how='left').drop(KEY, axis=1)
utils.to_feature(test_, '../feature/test')
#==============================================================================
utils.end(__file__)
|
StarcoderdataPython
|
1761963
|
# Generated by Django 2.0.7 on 2018-07-21 11:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0004_auto_20180721_1028'),
]
operations = [
migrations.AlterModelOptions(
name='bookinstance',
options={'ordering': ['due_back'], 'permissions': (('is_library_member', 'A Library Member'),)},
),
]
|
StarcoderdataPython
|
1608734
|
<filename>myfirst/apps/articles/models.py
import datetime
from django.db import models
from django.utils import timezone
class Article(models.Model):
article_title = models.CharField('article title', max_length = 200)
article_text = models.TextField('article text')
pub_date = models.DateTimeField('publication date')
def __str__(self):
return self.article_title
def recently_published(self):
return self.pub_date >= (timezone.now() - datetime.timedelta(days = 7))
class Comment(models.Model):
article = models.ForeignKey(Article, on_delete = models.CASCADE)
author_name = models.CharField('author name', max_length = 50)
comment_text = models.CharField('comment text', max_length = 200)
def __str__(self):
return self.author_name
|
StarcoderdataPython
|
169434
|
<reponame>Julymusso/IFES
#var
#n, media, cont: int
#i: str
i='s'
cont=0
media=0
while i!='n':
n=int(input('Digite um número: '))
media=media+n
cont=cont+1
i=input("Desejar continuar somando (s) ou deseja encerrar o programa (n)? ")
media=media/cont
print (media)
|
StarcoderdataPython
|
1658802
|
import string
from django.db import models
from django.contrib.postgres.fields import ArrayField
class Icon(models.Model):
name = models.CharField(verbose_name='Nome', max_length=64, unique=True)
tags = ArrayField(models.CharField(max_length=128), blank=True)
slug = models.SlugField(verbose_name='Slug', max_length=255, unique=True)
class Meta:
db_table = "icons"
verbose_name = "icone"
verbose_name_plural = "icones"
def __str__(self):
return self.name
def get_absolute_url(self):
return 'details/{}'.format(self.slug)
class IconFile(models.Model):
# file formats
AI = 'AI'
EPS = 'EPS'
JPG = 'JPG'
PDF = 'PDF'
PNG = 'PNG'
SVG = 'SVG'
filetype_choices = (
(AI, 'ai'),
(EPS, 'eps'),
(JPG, 'jpg'),
(PDF, 'pdf'),
(PNG, 'png'),
(SVG, 'svg')
)
icon = models.ForeignKey(Icon, on_delete=models.CASCADE, related_name='files')
uploaded_at = models.DateTimeField(auto_now_add=True)
file_extension = models.CharField(verbose_name='Formato', max_length=4, choices=filetype_choices, default=SVG)
icon_file = models.FileField(verbose_name='Arquivo', upload_to='icons/')
class Meta:
db_table = "files"
verbose_name = "arquivo"
verbose_name_plural = "arquivos"
|
StarcoderdataPython
|
1737357
|
print('PAR OU IMPAR')
num = int(input('Digite um número inteiro:'))
if num%2==0:
print('PAR')
else:
print('ÍMPAR')
|
StarcoderdataPython
|
179959
|
d1 = {42: 100}
d2 = {'abc': 'fob'}
d3 = {1e1000: d1}
s = set([frozenset([2,3,4])])
class C(object):
abc = 42
def f(self): pass
cinst = C()
class C2(object):
abc = 42
def __init__(self):
self.oar = 100
self.self = self
def __repr__(self):
return 'myrepr'
def __hex__(self):
return 'myhex'
def f(self): pass
c2inst = C2()
class C3(object):
def __init__(self):
self.abc = 42
self._contents = [1,2]
def __iter__(self):
return iter(self._contents)
def __len__(self):
return len(self._contents)
def __getitem__(self, index):
return self._contents[index]
c3inst = C3()
l = [1, 2, ]
i = 3
pass
|
StarcoderdataPython
|
1715667
|
import numpy as np
def mean(values, ignore_zeros=False):
used_values = [x for x in values if not ignore_zeros or (ignore_zeros and x != 0)]
return np.mean(used_values) if len(used_values) else 0
|
StarcoderdataPython
|
192530
|
<reponame>gkrish19/SIAM<filename>Software/run.py
import os
os.system('python top_file.py --model_type=DenseNet-BC --dataset=C100+ --saves --logs --renew-logs --train --test')
os.system('python top_file.py --model_type=DenseNet-BC --dataset=C100+ --saves --logs --renew-logs --train --test --quant --act_width=8 --wgt_width=8')
# os.system('python top_file.py --model_type=VGG19 --dataset=SVHN --saves --logs --renew-logs --vat --train --test --stddevVar=0.1 --quant --act_width=8 --wgt_width=8')
dev = [0.1, 0.2, 0.3, 0.4, 0.5]
adc = [4, 5, 6, 7, 8]
xbar = [64, 128, 256, 512]
for i in dev:
os.system('python top_file_small.py --model_type=DenseNet-BC --dataset=C100+ --saves --logs --renew-logs --vat --train --test --stddevVar=%.1f --quant --act_width=8 --wgt_width=8' %(i))
for j in xbar:
j = int(j)
for k in adc:
k= int(k)
os.system('python top_file_small.py --model_type=DenseNet-BC --dataset=C100+ --saves --logs --renew-logs --vat --train --test --stddevVar=%.1f --quant --act_width=8 --wgt_width=8 --rram --xbar_size=%d --adc_bits=%d' %(i,j,k))
|
StarcoderdataPython
|
165145
|
"""
This is awesome. And needs more documentation.
To bring some light in the big number of classes in this file:
First there are:
* ``SuperForm``
* ``SuperModelForm``
They are the forms that you probably want to use in your own code. They are
direct base classes of ``django.forms.Form`` and ``django.forms.ModelForm``
and have the formset functionallity of this module backed in. They are ready
to use. Subclass them and be happy.
Then there are:
* ``SuperFormMixin``
* ``SuperModelFormMixin``
These are the mixins you can use if you don't want to subclass from
``django.forms.Form`` for whatever reason. The ones with Base at the beginning
don't have a metaclass attached. The ones without the Base in the name have
the relevant metaclass in place that handles the search for
``FormSetField``s.
Here is an example on how you can use this module::
from django import forms
from django_superform import SuperModelForm, FormSetField
from .forms import CommentFormSet
class PostForm(SuperModelForm):
title = forms.CharField()
text = forms.CharField()
comments = FormSetField(CommentFormSet)
# Now you can use the form in the view:
def post_form(request):
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
obj = form.save()
return HttpResponseRedirect(obj.get_absolute_url())
else:
form = PostForm()
return render_to_response('post_form.html', {
'form',
}, context_instance=RequestContext(request))
And yes, thanks for asking, the ``form.is_valid()`` and ``form.save()`` calls
transparantly propagate to the defined comments formset and call their
``is_valid()`` and ``save()`` methods. So you don't have to do anything
special in your view!
Now to how you can access the instantiated formsets::
>>> form = PostForm()
>>> form.fields['comments']
<CommetFormSet: ...>
Or in the template::
{{ form.as_p }}
{{ form.fields.comments.management_form }}
{% for fieldset_form in form.fields.comments %}
{{ fieldset_form.as_p }}
{% endfor %}
You're welcome.
"""
import django
from django import forms
from django.forms.forms import ErrorDict, ErrorList
from .fields import CompositeField
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
class SuperFormMixin(object):
"""
The base class for all super forms. It does not inherit from any other
classes, so you are free to mix it into any custom form class you have.
.. code:: python
from django_superform import SuperFormMixin
class MySuperForm(MyCustomForm):
pass
The goal of a superform is to behave just like a normal django form but is
able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
def __init__(self, *args, **kwargs):
super(SuperFormMixin, self).__init__(*args, **kwargs)
self._init_composite_fields()
if django.VERSION < (1, 9):
# This behavior is not needed after django 1.9 introduced
# get_bound_field.
def __getitem__(self, name):
"""
Returns a ``django.forms.BoundField`` for the given field name.
It also returns
:class:`~django_superform.boundfield.CompositeBoundField`
instances for composite fields.
"""
field = self.fields[name]
if hasattr(field, 'get_bound_field'):
return field.get_bound_field(self, name)
return super(SuperFormMixin, self).__getitem__(name)
def add_prefix(self, name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
field = self.fields.get(name)
if isinstance(field, CompositeField):
return field.get_prefix(self, name)
return super(SuperFormMixin, self).add_prefix(name)
def get_composite_field_value(self, name):
"""
Return the form/formset instance for the given field name.
"""
field = self.fields[name]
if hasattr(field, 'get_form'):
return self.forms[name]
if hasattr(field, 'get_formset'):
return self.formsets[name]
def _init_composite_field(self, name, field):
if hasattr(field, 'get_form'):
form = field.get_form(self, name)
field.widget.form = form
self.forms[name] = form
if hasattr(field, 'get_formset'):
formset = field.get_formset(self, name)
field.widget.formset = formset
self.formsets[name] = formset
def _init_composite_fields(self):
"""
Setup the forms and formsets.
"""
self.forms = OrderedDict()
self.formsets = OrderedDict()
for name, field in self.fields.items():
if isinstance(field, CompositeField):
self._init_composite_field(name, field)
def full_clean(self):
"""
Clean the form, including all formsets and add formset errors to the
errors dict. Errors of nested forms and formsets are only included if
they actually contain errors.
"""
super(SuperFormMixin, self).full_clean()
for field_name, composite in self.forms.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorDict(composite._errors)
for field_name, composite in self.formsets.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorList(composite._errors)
@property
def media(self):
"""
Incorporate composite field's media.
"""
media = forms.Media()
for name, field in self.fields.items():
if isinstance(field, CompositeField):
media = media + self.get_composite_field_value(name).media
else:
media = media + field.widget.media
return media
class SuperModelFormMixin(SuperFormMixin):
"""
Can be used in with your custom form subclasses like this:
.. code:: python
from django_superform import SuperModelFormMixin
class MySuperForm(SuperModelFormMixin, MyCustomModelForm)):
pass
"""
def save(self, commit=True):
"""
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
"""
saved_obj = self.save_form(commit=commit)
self.save_forms(commit=commit)
self.save_formsets(commit=commit)
return saved_obj
def _extend_save_m2m(self, name, composites):
additional_save_m2m = []
for composite in composites:
if hasattr(composite, 'save_m2m'):
additional_save_m2m.append(composite.save_m2m)
if not additional_save_m2m:
return
def additional_saves():
for save_m2m in additional_save_m2m:
save_m2m()
# The save() method was called before save_forms()/save_formsets(), so
# we will already have save_m2m() available.
if hasattr(self, 'save_m2m'):
_original_save_m2m = self.save_m2m
else:
def _original_save_m2m():
return None
def augmented_save_m2m():
_original_save_m2m()
additional_saves()
self.save_m2m = augmented_save_m2m
setattr(self, name, additional_saves)
def save_form(self, commit=True):
"""
This calls Django's ``ModelForm.save()``. It only takes care of
saving this actual form, and leaves the nested forms and formsets
alone.
We separate this out of the
:meth:`~django_superform.forms.SuperModelForm.save` method to make
extensibility easier.
"""
return super(SuperModelFormMixin, self).save(commit=commit)
def save_forms(self, commit=True):
saved_composites = []
for name, composite in self.forms.items():
field = self.fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_forms_m2m', saved_composites)
def save_formsets(self, commit=True):
"""
Save all formsets. If ``commit=False``, it will modify the form's
``save_m2m()`` so that it also calls the formsets' ``save_m2m()``
methods.
"""
saved_composites = []
for name, composite in self.formsets.items():
field = self.fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_formsets_m2m', saved_composites)
class SuperModelForm(SuperModelFormMixin, forms.ModelForm):
"""
The ``SuperModelForm`` works like a Django ``ModelForm`` but has the
capabilities of nesting like :class:`~django_superform.forms.SuperForm`.
Saving a ``SuperModelForm`` will also save all nested model forms as well.
"""
class SuperForm(SuperFormMixin, forms.Form):
"""
The base class for all super forms. The goal of a superform is to behave
just like a normal django form but is able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
|
StarcoderdataPython
|
3356988
|
import os
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, GRUCell
import sys
import time
from sklearn.metrics import f1_score
import random
class rnn(object):
'''
rnn modular add-on for capturing case-level-context
parameters:
- num_classes: int
number of output classes
- max_docs: int
maximum number of documents in any sequence
- input_size: int
embedding dimension size of document embeddings
- rnn_units: int (default: 300)
number of rnn units in RNN layer
- dropout_keep: float (default: 0.9)
dropout keep rate after rnn layer
- lr: float (default: 0.0001)
learning rate for adam optimizer
- bidirectional: Boolean (default: True)
set to True to use case-level context from past and future documents
set to False to only use case-level context from past documents
methods:
- train(data,labels,batch_size=100,epochs=50,patience=5,
validation_data=None,savebest=False,filepath=None)
train network on given data
- predict(data,batch_size=100)
return the predicted labels for given data
- score(data,labels,batch_size=100)
return the micro and macro f-scores of predicted labels on given data
- save(filepath)
save the model weights to a file
- load(filepath)
load model weights from a file
'''
def __init__(self,num_classes,max_docs,input_size,rnn_units=300,dropout_keep=0.9,
lr=0.0001,bidirectional=True):
self.max_docs = max_docs
self.rnn_units = rnn_units
self.dropout_keep = dropout_keep
self.dropout = tf.placeholder(tf.float32)
self.doc_input = tf.placeholder(tf.float32, shape=[None,max_docs,input_size])
self.num_docs = tf.placeholder(tf.int32, shape=[None])
max_len = tf.reduce_max(self.num_docs)
doc_input_reduced = self.doc_input[:,:max_len,:]
doc_input_reduced = tf.nn.dropout(doc_input_reduced,self.dropout)
with tf.variable_scope('rnn',initializer=tf.contrib.layers.xavier_initializer()):
if bidirectional:
[outputs_fw,outputs_bw],_ = tf.nn.bidirectional_dynamic_rnn(
GRUCell(self.rnn_units),GRUCell(self.rnn_units),
doc_input_reduced,sequence_length=self.num_docs,dtype=tf.float32)
outputs = tf.concat((outputs_fw,outputs_bw),2)
else:
outputs,_ = tf.nn.dynamic_rnn(GRUCell(self.rnn_units),
doc_input_reduced,sequence_length=self.num_docs,dtype=tf.float32)
outputs = tf.nn.dropout(outputs,self.dropout)
self.doc_idx = tf.placeholder(tf.int32, shape=[None,2])
self.doc_embeds = tf.gather_nd(outputs,self.doc_idx)
#classification functions
logits = tf.layers.dense(self.doc_embeds,num_classes,
kernel_initializer=tf.contrib.layers.xavier_initializer())
self.prediction = tf.nn.softmax(logits)
#loss, accuracy, and training functions
self.labels = tf.placeholder(tf.int32,shape=[None])
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,labels=self.labels))
self.optimizer = tf.train.AdamOptimizer(lr,0.9,0.99).minimize(self.loss)
#init op
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.saver = tf.train.Saver()
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
def _batch_prepro(self,data,labels=None):
'''
used to pad 0-pad sequences and get indices of nonzero elements
'''
batch_size = len(data)
dims = len(data[0][0])
retval = np.zeros((batch_size,self.max_docs,dims))
doc_idx = []
num_docs = []
for i,case in enumerate(data):
l = len(case)
for j in range(l):
doc_idx.append([i,j])
retval[i,:l,:] = np.array(case)
num_docs.append(l)
doc_idx = np.array(doc_idx)
if type(labels) != type(None):
labels_flat = [label for group in labels for label in group]
return retval,labels_flat,num_docs,doc_idx
return retval,num_docs,doc_idx
def train(self,data,labels,batch_size=100,epochs=50,patience=5,validation_data=None,
savebest=False,filepath=None):
'''
train network on given data
parameters:
- data: Iterable[Iterable[np.ndarray(dim=input_size)]]
The input data represents a list of cases,
where each case consists of a list of documents,
and each document is represented by a document embedding
- labels: Iterable[Iterable[int]]
The labels are represented by a list of cases,
where each case consists of a list of labels for each document in the case
- batch size: int (default: 100)
batch size to use for training
- epochs: int (default: 50)
number of epochs to train for
- patience: int (default: 5)
training stops after no improvement in validation score
for this number of epochs
- validation_data: tuple (optional)
tuple of inputs (X,y) representing validation data
- savebest: boolean (default: False)
set to True to save the best model based on validation score per epoch
- filepath: string (optional)
path to save model if savebest is set to True
outputs:
None
'''
if savebest==True and filepath==None:
raise Exception("Please enter a path to save the network")
if validation_data:
validation_size = len(validation_data[0])
else:
validation_size = len(data)
print('training network on %i documents, validation on %i documents' \
% (len(data), validation_size))
#track best model for saving
prevbest = 0
pat_count = 0
for ep in range(epochs):
#shuffle data
xy = list(zip(data,labels))
random.shuffle(xy)
data,labels = zip(*xy)
data = list(data)
labels = list(labels)
y_preds = []
y_trues = []
start_time = time.time()
#train
for start in range(0,len(data),batch_size):
#get batch index
if start+batch_size < len(data):
stop = start+batch_size
else:
stop = len(data)
X,y,num_docs,doc_idx = self._batch_prepro(data[start:stop],labels[start:stop])
feed_dict = {self.doc_input:X,self.labels:y,self.num_docs:num_docs,self.doc_idx:doc_idx,self.dropout:self.dropout_keep}
pred,cost,_ = self.sess.run([self.prediction,self.loss,self.optimizer],feed_dict=feed_dict)
#track correct predictions
y_preds.append(np.argmax(pred,1))
y_trues.extend(y)
sys.stdout.write("epoch %i, sample %i of %i, loss: %f \r"\
% (ep+1,stop+1,len(data),cost))
sys.stdout.flush()
#checkpoint after every epoch
print("\ntraining time: %.2f" % (time.time()-start_time))
y_preds = np.concatenate(y_preds,0)
micro = f1_score(y_trues,y_preds,average='micro')
macro = f1_score(y_trues,y_preds,average='macro')
print("epoch %i training micro/macro: %.4f, %.4f" % (ep+1,micro,macro))
micro,macro,loss = self.score(validation_data[0],validation_data[1],batch_size=batch_size)
print("epoch %i validation micro/macro: %.4f, %.4f" % (ep+1,micro,macro))
#reset timer
start_time = time.time()
#save if performance better than previous best
if micro >= prevbest:
prevbest = micro
pat_count = 0
if savebest:
self.save(filepath)
else:
pat_count += 1
if pat_count >= patience:
break
def predict(self,data,batch_size=100):
'''
return the predicted labels for given data
parameters:
- data: Iterable[Iterable[np.ndarray(dim=input_size)]]
The input data represents a list of cases,
where each case consists of a list of documents,
and each document is represented by a document embedding
- batch size: int (default: 100)
batch size to use during inference
outputs:
flattened list of predicted labels for input data
'''
y_preds = []
for start in range(0,len(data),batch_size):
#get batch index
if start+batch_size < len(data):
stop = start+batch_size
else:
stop = len(data)
X,num_docs,doc_idx = self._batch_prepro(data[start:stop])
feed_dict = {self.doc_input:X,self.num_docs:num_docs,self.doc_idx:doc_idx,self.dropout:1.0}
preds = self.sess.run(self.prediction,feed_dict=feed_dict)
y_preds.append(np.argmax(preds,1))
sys.stdout.write("processed %i of %i records \r" % (stop+1,len(data)))
sys.stdout.flush()
print()
y_preds = np.concatenate(y_preds,0)
return y_preds
def score(self,data,labels,batch_size=100):
'''
return the micro and macro f-score of predicted labels on given data
parameters:
- data: Iterable[Iterable[np.ndarray(dim=input_size)]]
The input data represents a list of cases,
where each case consists of a list of documents,
and each document is represented by a document embedding
- labels: Iterable[Iterable[int]]
The labels are represented by a list of cases,
where each case consists of a list of labels for each document in the case
- batch size: int (default: 64)
batch size to use during inference
outputs:
tuple of floats (micro,macro,loss) representing micro f-score, macro f-score,
and average loss of predicted labels on given data
'''
y_preds = []
y_trues = []
losses = []
for start in range(0,len(data),batch_size):
#get batch index
if start+batch_size < len(data):
stop = start+batch_size
else:
stop = len(data)
X,y,num_docs,doc_idx = self._batch_prepro(data[start:stop],labels[start:stop])
feed_dict = {self.doc_input:X,self.labels:y,self.num_docs:num_docs,self.doc_idx:doc_idx,self.dropout:1.0}
preds,loss = self.sess.run([self.prediction,self.loss],feed_dict=feed_dict)
y_preds.append(np.argmax(preds,1))
y_trues.extend(y)
losses.append(loss)
sys.stdout.write("processed %i of %i records \r" % (stop+1,len(data)))
sys.stdout.flush()
print()
y_preds = np.concatenate(y_preds,0)
micro = f1_score(y_trues,y_preds,average='micro')
macro = f1_score(y_trues,y_preds,average='macro')
loss = np.mean(losses)
return micro,macro,loss
def save(self,filename):
'''
save the model weights to a file
parameters:
- filepath: string
path to save model weights
outputs:
None
'''
self.saver.save(self.sess,filename)
def load(self,filename):
'''
load model weights from a file
parameters:
- filepath: string
path from which to load model weights
outputs:
None
'''
self.saver.restore(self.sess,filename)
if __name__ == "__main__":
#params
batch_size = 64
doc_embed_dim = 100
max_seq_len = 10
num_sequences = 5000
num_classes = 5
epochs = 10
#generate dummy data
X = []
y = []
for i in range(num_sequences):
seq_len = np.random.randint(2,max_seq_len)
X_seq = np.random.rand(seq_len,doc_embed_dim)
y_seq = np.random.randint(0,num_classes,seq_len)
X.append(X_seq)
y.append(y_seq)
#dummy train test split
val_size = int(0.2 * num_sequences)
X_train = X[:-val_size]
X_val = X[-val_size:]
y_train = y[:-val_size]
y_val = y[-val_size:]
#test model
model = rnn(num_classes,max_seq_len,doc_embed_dim)
model.train(X_train,y_train,batch_size,epochs,validation_data=(X_val,y_val))
|
StarcoderdataPython
|
162869
|
<filename>bitcoinlantern/api/connection.py<gh_stars>1-10
from enum import Enum
from rpc.exceptions import BitcoinException
import requests
class BlockExplorers(Enum):
BLOCKEXPLORER = 'https://blockexplorer.com/api/addr/'
class BlockExplorerConnection(object):
def __init__(self, explorer_enum, token=''):
self.explorer = explorer_enum
self.token = token
if token != '':
headers={'Authorization': 'access_token myToken'}
def getAddressBalance(self, address):
try:
response = requests.get(self.explorer.value + address + '/balance', timeout=3)
if response.text == 'Invalid address: Checksum mismatch. Code:1':
return TypeError
return int(response.text)
except ConnectionError as error:
print(error)
# except timeout
return error
|
StarcoderdataPython
|
104025
|
# -*- coding: utf-8 -*-
import time
from sklearn import ensemble
from sklearn import linear_model
from sklearn import naive_bayes
from sklearn import neighbors
from sklearn import neural_network
from sklearn import svm
from sklearn import tree
from sklearn.metrics import f1_score
from sklearn.model_selection import GridSearchCV
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
class CVParameters:
ada_boost = {
'algorithm': ['SAMME', 'SAMME.R'],
'learning_rate': [i / 10. for i in range(1, 10, 1)],
'n_estimators': list(range(10, 100, 10))
}
bagging = {
'n_estimators': list(range(5, 50, 5)),
'bootstrap_features': [0, 1]
}
extra_trees = {
'criterion': ['gini', 'entropy'],
'n_estimators': list(range(5, 50, 5)),
'warm_start': [1, 0]
}
random_forest = {
'criterion': ['gini', 'entropy'],
'n_estimators': list(range(5, 50, 5)),
'oob_score': [1, 0],
'warm_start': [1, 0]
}
logistic_regression = {
'tol': [1e-3 / i for i in range(10, 100, 10)],
'C': [i / 10 for i in range(5, 15, 1)],
'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
}
passive_aggressive = {
'tol': [1e-3 / i for i in range(10, 100, 10)],
'early_stopping': [True, False],
'loss': ['hinge', 'squared_hinge'],
'warm_start': [1, 0]
}
ridge = {
'alpha': [i / 10 for i in range(5, 15, 1)],
'tol': [1e-3 / i for i in range(10, 100, 10)]
}
sgd = {
'loss': ['hinge', 'log', 'modified_huber', 'squared_hinge',
'perceptron'],
'penalty': ['l1', 'l2', 'elasticnet', 'none'],
'alpha': [i / 10000 for i in range(8, 12, 1)],
'tol': [1e-3 / i for i in range(10, 100, 10)]
}
bernoulli = {
'alpha': [i / 10 for i in range(1, 10, 1)],
}
gaussian = {
'var_smoothing': [1e-9 / i for i in range(10, 100, 10)],
}
k_neighbors = {
'n_neighbors': [i for i in range(3, 8, 1)],
'weights': ['uniform', 'distance'],
'algorithm': ['ball_tree', 'kd_tree', 'brute'],
'p': [1, 2, 3]
}
nearest_centroid = {
'metric': ['euclidean', 'cosine', 'manhattan']
}
mlp = {
'activation': ['logistic', 'tanh', 'relu'],
'solver': ['lbfgs', 'sgd', 'adam'],
'alpha': [0.0001 / i for i in range(10, 100, 10)],
'learning_rate': ['constant', 'invscaling', 'adaptive'],
'early_stopping': [True]
}
linear_svc = {
'penalty': ['l2'],
'multi_class': ['ovr', 'crammer_singer'],
'tol': [1e-3 / i for i in range(10, 100, 10)],
'C': [i / 10 for i in range(5, 15, 1)]
}
decision_tree = {
'criterion': ['gini', 'entropy'],
'splitter': ['best', 'random']
}
extra_tree = {
'criterion': ['gini', 'entropy'],
'splitter': ['best', 'random']
}
gradient_boosting = {
'loss': ['deviance'],
'learning_rate': [i / 10. for i in range(1, 10, 1)],
'criterion': ['friedman_mse'],
'tol': [1e-4 / i for i in range(10, 100, 10)]
}
hist_gradient_boosting = {
'l2_regularization': [0, 0.1],
'tol': [1e-7 / i for i in range(10, 100, 10)]
}
rstate = 0
def train_test(x_tr, y_tr, x_te, y_te, name):
algorithms = {
'ada_boost': ensemble.AdaBoostClassifier(),
'bagging': ensemble.BaggingClassifier(),
'extra_trees': ensemble.ExtraTreesClassifier(),
'random_forest': ensemble.RandomForestClassifier(),
'logistic_regression': linear_model.LogisticRegression(),
'passive_aggressive': linear_model.PassiveAggressiveClassifier(),
'ridge': linear_model.RidgeClassifier(),
'sgd': linear_model.SGDClassifier(),
'bernoulli': naive_bayes.BernoulliNB(),
'gaussian': naive_bayes.GaussianNB(),
'k_neighbors': neighbors.KNeighborsClassifier(),
'nearest_centroid': neighbors.NearestCentroid(),
'mlp': neural_network.MLPClassifier(),
'linear_svc': svm.LinearSVC(),
'decision_tree': tree.DecisionTreeClassifier(),
'extra_tree': tree.ExtraTreeClassifier(),
'gradient_boosting': ensemble.GradientBoostingClassifier(),
'hist_gradient_boosting': HistGradientBoostingClassifier()
}
res = {}
try:
clf = GridSearchCV(algorithms.get(name), getattr(CVParameters, name),
cv=2, n_jobs=-1)
start = time.clock()
clf.fit(x_tr, y_tr)
tr_time = time.clock() - start
print(tr_time)
print(clf.best_params_)
print(clf.best_score_)
tr_score = clf.score(x_tr, y_tr)
score = clf.score(x_te, y_te)
tr_fscore = f1_score(y_tr, clf.predict(x_tr), average='weighted')
fscore = f1_score(y_te, clf.predict(x_te), average='weighted')
print(tr_score, score, tr_fscore, fscore)
res = {name: {'test': score, 'train': tr_score, 'f1_test': fscore,
'f1_train': tr_fscore, 'tr_time': tr_time}}
res[name].update(clf.best_params_)
except Exception as e:
print(e)
return res
|
StarcoderdataPython
|
1750445
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Optional, Callable, Any, TYPE_CHECKING
from ..enums import ApplicationCommandPermissionType
if TYPE_CHECKING:
from ..enums import ApplicationCommandType
from .command import ApplicationCommand
__all__ = ('ApplicationCommandPermissions', 'CommandPermissionOverwrite', 'permission')
class ApplicationCommandPermissions:
"""A class that allows you to define permissions for an application command
in a :class:`Guild`.
Parameters
-----------
command: :class:`application.ApplicationCommand`
The application command whose permissions are being defined.
guild_id: :class:`int`
The ID of guild in which permissions are applied.
Attributes
----------
overwrite: List[:class:`CommandPermissionOverwrite`]
The overwrites this permissions set holds.
"""
def __init__(self, guild_id: int, command: ApplicationCommand = None):
self.command = command # type: ignore
self.guild_id = guild_id
self.overwrites = []
def get_overwrite(self, entity_id: int) -> Optional[CommandPermissionOverwrite]:
"""Gets permission overwrite for provided entity ID.
Parameters
-----------
entity_id: :class:`int`
The ID of role or user whose overwrite should be get.
Returns
-------
Optional[:class:`.CommandPermissionOverwrite`]
The permission overwrite if found, otherwise ``None``
"""
for overwrite in self.overwrites:
if overwrite.role_id == entity_id or overwrite.user_id == entity_id:
return overwrite
def add_overwrite(self, **options: Any) -> CommandPermissionOverwrite:
"""Adds a permission overwrite to this permissions set.
Parameters
-----------
**options:
The options of :class:`.CommandPermissionOverwrite`
Returns
-------
:class:`CommandPermissionOverwrite`
The permission overwrite that was added.
"""
overwrite = CommandPermissionOverwrite(**options)
self.overwrites.append(overwrite)
return overwrite
def remove_overwrite(self, entity_id: int) -> None:
"""Removes a permission overwrite for provided entity ID.
This method will not raise error if overwrite is not found.
Parameters
-----------
entity_id: :class:`int`
The ID of role or user whose overwrite should be removed.
"""
for overwrite in self.overwrites:
if overwrite.role_id == entity_id or overwrite.user_id == entity_id:
return self.overwrites.remove(overwrite)
class CommandPermissionOverwrite:
"""A class that defines an overwrite for :class:`ApplicationCommandPermissions`.
.. note::
Either of ``user_id`` or ``role_id`` must be provided.
Parameters
-----------
role_id: :class:`int`
The ID of role whose overwrite is being defined, this cannot be mixed with ``user_id``
parameter.
user_id: :class:`int`
The ID of user whose overwrite is being defined, this cannot be mixed with ``user_id``
parameter.
permission: :class:`bool`
Whether to allow the command for provided user or role ID. Defaults to ``False``
"""
if TYPE_CHECKING:
type: ApplicationCommandPermissionType
def __init__(self, *,
role_id: Optional[int] = None,
user_id: Optional[int] = None,
permission: bool = False,
):
self.role_id = role_id
self.user_id = user_id
self.permission = permission
if self.role_id is not None and self.user_id is not None:
raise TypeError('role_id and user_id cannot be mixed in permissions')
if self.role_id is not None:
self.type = ApplicationCommandPermissionType.role
elif self.user_id is not None:
self.type = ApplicationCommandPermissionType.user
def _get_id(self) -> Optional[int]:
if self.type == ApplicationCommandPermissionType.user:
return self.user_id
return self.role_id
def to_dict(self):
return {
'id': self._get_id(),
'type': self.type.value,
'permission': self.permission,
}
def permission(*, guild_id: int, **options: Any):
"""A decorator that defines the permissions of :class:`application.ApplicationCommand`
Usage: ::
@bot.slash_command(guild_ids=[12345], description='Cool command')
@discpy.application.permission(guild_id=12345, user_id=1234, permission=False)
@discpy.application.permission(guild_id=12345, role_id=123456, permission=True)
async def command(ctx):
await ctx.respond('Hello world')
In above command, The user with ID ``1234`` would not be able to use to command
and anyone with role of ID ``123456`` will be able to use the command in the guild
with ID ``12345``.
"""
def inner(func: Callable[..., Any]):
if not hasattr(func, "__application_command_permissions__"):
func.__application_command_permissions__ = {}
for original_guild_id in func.__application_command_permissions__:
if original_guild_id == guild_id:
func.__application_command_permissions__[original_guild_id].append(CommandPermissionOverwrite(**options))
return func
func.__application_command_permissions__[guild_id] = []
func.__application_command_permissions__[guild_id].append(CommandPermissionOverwrite(**options))
return func
return inner
|
StarcoderdataPython
|
199827
|
"""
.. module: historical.vpc.differ
:platform: Unix
:copyright: (c) 2017 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. author:: <NAME> <<EMAIL>>
"""
import logging
from raven_python_lambda import RavenLambdaWrapper
from historical.common.dynamodb import process_dynamodb_differ_record
from historical.common.util import deserialize_records
from historical.constants import LOGGING_LEVEL
from historical.vpc.models import CurrentVPCModel, DurableVPCModel
logging.basicConfig()
LOG = logging.getLogger('historical')
LOG.setLevel(LOGGING_LEVEL)
@RavenLambdaWrapper()
def handler(event, context): # pylint: disable=W0613
"""
Historical security group event differ.
Listens to the Historical current table and determines if there are differences that need to be persisted in the
historical record.
"""
# De-serialize the records:
records = deserialize_records(event['Records'])
for record in records:
process_dynamodb_differ_record(record, CurrentVPCModel, DurableVPCModel)
|
StarcoderdataPython
|
1660495
|
# Tests if delta calculator is working properly with simple assertion function
from finetuna.calcs import DeltaCalc
from ase.calculators.emt import EMT
import numpy as np
import copy
from ase.build import fcc100, add_adsorbate, molecule
from ase.constraints import FixAtoms
from ase.build import bulk
from ase.utils.eos import EquationOfState
from finetuna.base_calcs.morse import MultiMorse
parent_calculator = EMT()
energies = []
volumes = []
LC = [3.5, 3.55, 3.6, 3.65, 3.7, 3.75]
for a in LC:
cu_bulk = bulk("Cu", "fcc", a=a)
calc = EMT()
cu_bulk.set_calculator(calc)
e = cu_bulk.get_potential_energy()
energies.append(e)
volumes.append(cu_bulk.get_volume())
eos = EquationOfState(volumes, energies)
v0, e0, B = eos.fit()
aref = 3.6
vref = bulk("Cu", "fcc", a=aref).get_volume()
copper_lattice_constant = (v0 / vref) ** (1 / 3) * aref
slab = fcc100("Cu", a=copper_lattice_constant, size=(2, 2, 3))
ads = molecule("C")
add_adsorbate(slab, ads, 2, offset=(1, 1))
cons = FixAtoms(indices=[atom.index for atom in slab if (atom.tag == 3)])
slab.set_constraint(cons)
slab.center(vacuum=13.0, axis=2)
slab.set_pbc(True)
slab.wrap(pbc=[True] * 3)
slab.set_calculator(copy.copy(parent_calculator))
slab.set_initial_magnetic_moments()
images = [slab]
parent_energy = parent_ref = slab.get_potential_energy()
Gs = {
"default": {
"G2": {
"etas": np.logspace(np.log10(0.05), np.log10(5.0), num=4),
"rs_s": [0],
},
"G4": {"etas": [0.005], "zetas": [1.0, 4.0], "gammas": [1.0, -1.0]},
"cutoff": 6,
},
}
# create image with base calculator attached
cutoff = Gs["default"]["cutoff"]
base_calc = MultiMorse(images, cutoff, combo="mean")
slab_base = slab.copy()
slab_base.set_calculator(base_calc)
base_energy = base_ref = slab_base.get_potential_energy()
# Add
delta_calc = DeltaCalc([parent_calculator, base_calc], "add", [slab, slab_base])
# Set slab calculator to delta calc and evaluate energy
slab_add = slab.copy()
slab_add.set_calculator(delta_calc)
add_energy = slab_add.get_potential_energy()
# Sub
delta_calc = DeltaCalc([parent_calculator, base_calc], "sub", [slab, slab_base])
# Set slab calculator to delta calc and evaluate energy
slab_sub = slab.copy()
slab_sub.set_calculator(delta_calc)
sub_energy = slab_sub.get_potential_energy()
def test_delta_sub():
assert sub_energy == (
(parent_energy - base_energy) + (-parent_ref + base_ref)
), "Energies don't match!"
def test_delta_add():
assert (
np.abs(add_energy - ((base_energy + parent_energy) + (parent_ref - base_ref)))
< 1e-5
), "Energies don't match!"
|
StarcoderdataPython
|
122463
|
import numpy as np
import matplotlib.pyplot as plt
from time import time
from numba import cuda
N = 640000
def main():
x = np.linspace(0, 1, N, endpoint=True)
from serial import sArray
start = time()
f = sArray(x)
elapsed = time() - start
print("--- Serial timing: %s seconds ---" % elapsed)
from parallel import sArray
start = time()
fpar = sArray(x)
elapsed = time() - start
print("--- 1st parallel timing: %s seconds ---" % elapsed)
start = time()
fpar = sArray(x)
elapsed = time() - start
print("--- 2nd parallel timing: %s seconds ---" % elapsed)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11405
|
<reponame>avijit-chakroborty/ngraph-bridge
# ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge elementwise operations test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from common import NgraphTest
class TestElementwiseOperations(NgraphTest):
@pytest.mark.parametrize(("v1", "v2", "expected"),
((1.0, -1.0, [1.0]), (100, 200, ([200],)),
([0.0, 5.0, 10.0], [6.0],
(np.array([[6.0, 6.0, 10.0]]),))))
def test_maximum(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.maximum(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [True]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, False, True]]),))))
def test_less_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [True]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, False, True]]),))))
def test_less(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [False]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, True, False]]),))))
def test_greater_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [False]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, True, False]]),))))
def test_greater(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("v1", "v2", "expected"),
((True, True, [True]), (True, False, ([False],)),
(1.0, -2.0, ([True],)), (False, 100, ([False],)),
([False, True, False], [True],
(np.array([[False, True, False]]),))))
def test_logical_and(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.bool, shape=(None))
val2 = tf.compat.v1.placeholder(tf.bool, shape=(None))
out = tf.logical_and(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("test_input", "expected"), ((False, True),
(True, False)))
def test_logicalnot_1d(self, test_input, expected):
val = tf.compat.v1.placeholder(tf.bool, shape=(1,))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: (test_input,)})[
0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
def test_logicalnot_2d(self):
test_input = ((True, False, True), (False, True, False))
expected = np.logical_not(test_input)
val = tf.compat.v1.placeholder(tf.bool, shape=(2, 3))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: test_input})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
|
StarcoderdataPython
|
3250939
|
<filename>mxnet/tut-gpu.py
# https://gluon-crash-course.mxnet.io/use_gpus.html
from mxnet import nd, gpu, gluon, autograd
from mxnet.gluon import nn
from mxnet.gluon.data.vision import datasets, transforms
from time import time
# allocate data to a gpu
gpu_count = 1
x = nd.ones((3,4), ctx = gpu())
print(x)
if gpu_count > 1:
x.copyto(gpu(1))
net = nn.Sequential()
with net.name_scope():
net.add(
nn.Conv2D(channels=6, kernel_size=5, activation='relu'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=16, kernel_size=3, activation='relu'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Flatten(),
nn.Dense(120, activation='relu'),
nn.Dense(84, activation='relu'),
nn.Dense(10))
net.load_params('net.params', ctx=gpu(0))
x = nd.random.uniform(shape=(1,1,28,28), ctx=gpu(0))
print(net(x))
# advanced multi-gpu training
batch_size = 256
transformer = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(0.13, 0.31)])
train_data = gluon.data.DataLoader(
datasets.FashionMNIST(train=True).transform_first(transformer),
batch_size, shuffle=True, num_workers=4)
valid_data = gluon.data.DataLoader(
datasets.FashionMNIST(train=False).transform_first(transformer),
batch_size, shuffle=False, num_workers=4)
# devices = [gpu(0), gpu(1)]
devices = [gpu(x) for x in range(gpu_count)]
net.collect_params().initialize(force_reinit=True, ctx=devices)
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
for epoch in range(10):
train_loss = 0.
tic = time()
for data, label in train_data:
data_list = gluon.utils.split_and_load(data, devices)
label_list = gluon.utils.split_and_load(label, devices)
with autograd.record():
losses = [softmax_cross_entropy(net(X), y)
for X, y in zip(data_list, label_list)]
for l in losses:
l.backward()
trainer.step(batch_size)
train_loss += sum([l.sum().asscalar() for l in losses])
print('Epoch {}: Loss: {:.3f}, Time {:.1f} sec'.
format(epoch, train_loss/len(train_data)/batch_size, time() - tic))
|
StarcoderdataPython
|
1643685
|
<filename>examples/name_server_proxy.py
from osbrain import run_agent
from osbrain import run_nameserver
if __name__ == '__main__':
# System deployment
ns = run_nameserver()
run_agent('Agent0')
run_agent('Agent1')
run_agent('Agent2')
# Create a proxy to Agent1 and log a message
agent = ns.proxy('Agent1')
agent.log_info('Hello world!')
ns.shutdown()
|
StarcoderdataPython
|
4832680
|
<reponame>Acidburn0zzz/dfvfs
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the file entry implementation using the tarfile."""
from __future__ import unicode_literals
import unittest
from dfvfs.path import os_path_spec
from dfvfs.path import tar_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import tar_file_entry
from dfvfs.vfs import tar_file_system
from tests import test_lib as shared_test_lib
class TARFileEntryTest(shared_test_lib.BaseTestCase):
"""Tests the TAR extracted file entry."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.tar'])
self._SkipIfPathNotExists(test_file)
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._tar_path_spec = tar_path_spec.TARPathSpec(
location='/syslog', parent=self._os_path_spec)
self._file_system = tar_file_system.TARFileSystem(self._resolver_context)
self._file_system.Open(self._tar_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._file_system.Close()
def testIntialize(self):
"""Test the __init__ function."""
file_entry = tar_file_entry.TARFileEntry(
self._resolver_context, self._file_system, self._tar_path_spec)
self.assertIsNotNone(file_entry)
def testGetParentFileEntry(self):
"""Tests the GetParentFileEntry function."""
path_spec = tar_path_spec.TARPathSpec(
location='/syslog', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertIsNotNone(parent_file_entry)
self.assertEqual(parent_file_entry.name, '')
def testGetStat(self):
"""Tests the GetStat function."""
path_spec = tar_path_spec.TARPathSpec(
location='/syslog', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_object = file_entry.GetStat()
self.assertIsNotNone(stat_object)
self.assertEqual(stat_object.type, stat_object.TYPE_FILE)
self.assertEqual(stat_object.size, 1247)
self.assertEqual(stat_object.mode, 256)
self.assertEqual(stat_object.uid, 151107)
self.assertEqual(stat_object.gid, 5000)
self.assertEqual(stat_object.mtime, 1343166324)
self.assertFalse(hasattr(stat_object, 'mtime_nano'))
def testIsFunctions(self):
"""Test the Is? functions."""
path_spec = tar_path_spec.TARPathSpec(
location='/syslog', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertFalse(file_entry.IsDirectory())
self.assertTrue(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = tar_path_spec.TARPathSpec(
location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsRoot())
self.assertTrue(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
def testSubFileEntries(self):
"""Test the sub file entries iteration functionality."""
path_spec = tar_path_spec.TARPathSpec(
location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self._assertSubFileEntries(file_entry, ['syslog'])
# Test on a tar file that has missing directory entries.
test_file = self._GetTestFilePath(['missing_directory_entries.tar'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
path_spec = tar_path_spec.TARPathSpec(location='/', parent=path_spec)
file_system = tar_file_system.TARFileSystem(self._resolver_context)
self.assertIsNotNone(file_system)
file_system.Open(path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self._assertSubFileEntries(
file_entry, ['File System', 'Non Missing Directory Entry'])
file_system_sub_file_entry = None
for sub_file_entry in file_entry.sub_file_entries:
# The "File System" and its sub-directories have missing entries within
# the tar file, but still should be found due to the AssetManifest.plist
# file found within the directories.
if sub_file_entry.name == 'File System':
self.assertTrue(sub_file_entry.IsVirtual())
self._assertSubFileEntries(sub_file_entry, ['Recordings'])
file_system_sub_file_entry = sub_file_entry
else:
self._assertSubFileEntries(sub_file_entry, ['test_file.txt'])
if file_system_sub_file_entry:
for sub_file_entry in file_system_sub_file_entry.sub_file_entries:
self.assertTrue(sub_file_entry.IsVirtual())
self._assertSubFileEntries(sub_file_entry, ['AssetManifest.plist'])
file_system.Close()
def testDataStreams(self):
"""Test the data streams functionality."""
path_spec = tar_path_spec.TARPathSpec(
location='/syslog', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 1)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [''])
path_spec = tar_path_spec.TARPathSpec(
location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 0)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [])
def testGetDataStream(self):
"""Tests the GetDataStream function."""
path_spec = tar_path_spec.TARPathSpec(
location='/syslog', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream_name = ''
data_stream = file_entry.GetDataStream(data_stream_name)
self.assertIsNotNone(data_stream)
self.assertEqual(data_stream.name, data_stream_name)
data_stream = file_entry.GetDataStream('bogus')
self.assertIsNone(data_stream)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1636516
|
<filename>moai/export/__init__.py<gh_stars>1-10
from moai.export.single import Exporter as Single
from moai.export.collection import Exporters as Collection
__all__ = [
"Single",
"Collection",
]
|
StarcoderdataPython
|
3315349
|
#!/usr/bin/env python
from event_loop.event_loop import EventLoop
__author__ = 'aGn'
__copyright__ = "Copyright 2018, Planet Earth"
if __name__ == "__main__":
print('SNMP Begins')
try:
EventLoop().run_forever()
except KeyboardInterrupt:
import sys
sys.exit(0)
|
StarcoderdataPython
|
3248937
|
<filename>utils/prepare_for_doc_test.py
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Style utils to preprocess files for doc tests.
The doc precossing function can be run on a list of files and/org
directories of files. It will recursively check if the files have
a python code snippet by looking for a ```python or ```py syntax.
In the default mode - `remove_new_line==False` the script will
add a new line before every python code ending ``` line to make
the docstrings ready for pytest doctests.
However, we don't want to have empty lines displayed in the
official documentation which is why the new line command can be
reversed by adding the flag `--remove_new_line` which sets
`remove_new_line==True`.
When debugging the doc tests locally, please make sure to
always run:
```python utils/prepare_for_doc_test.py src doc```
before running the doc tests:
```pytest --doctest-modules $(cat utils/documentation_tests.txt) -sv --doctest-continue-on-failure --doctest-glob="*.mdx"```
Afterwards you should revert the changes by running
```python utils/prepare_for_doc_test.py src doc --remove_new_line```
"""
import argparse
import os
def process_code_block(code, add_new_line=True):
if add_new_line:
return maybe_append_new_line(code)
else:
return maybe_remove_new_line(code)
def maybe_append_new_line(code):
"""
Append new line if code snippet is a
Python code snippet
"""
lines = code.split("\n")
if lines[0] in ["py", "python"]:
# add new line before last line being ```
last_line = lines[-1]
lines.pop()
lines.append("\n" + last_line)
return "\n".join(lines)
def maybe_remove_new_line(code):
"""
Remove new line if code snippet is a
Python code snippet
"""
lines = code.split("\n")
if lines[0] in ["py", "python"]:
# add new line before last line being ```
lines = lines[:-2] + lines[-1:]
return "\n".join(lines)
def process_doc_file(code_file, add_new_line=True):
"""
Process given file.
Args:
code_file (`str` or `os.PathLike`): The file in which we want to style the docstring.
"""
with open(code_file, "r", encoding="utf-8", newline="\n") as f:
code = f.read()
# fmt: off
splits = code.split("```")
splits = [s if i % 2 == 0 else process_code_block(s, add_new_line=add_new_line) for i, s in enumerate(splits)]
clean_code = "```".join(splits)
# fmt: on
diff = clean_code != code
if diff:
print(f"Overwriting content of {code_file}.")
with open(code_file, "w", encoding="utf-8", newline="\n") as f:
f.write(clean_code)
def process_doc_files(*files, add_new_line=True):
"""
Applies doc styling or checks everything is correct in a list of files.
Args:
files (several `str` or `os.PathLike`): The files to treat.
Whether to restyle file or just check if they should be restyled.
Returns:
List[`str`]: The list of files changed or that should be restyled.
"""
for file in files:
# Treat folders
if os.path.isdir(file):
files = [os.path.join(file, f) for f in os.listdir(file)]
files = [f for f in files if os.path.isdir(f) or f.endswith(".mdx") or f.endswith(".py")]
process_doc_files(*files, add_new_line=add_new_line)
else:
try:
process_doc_file(file, add_new_line=add_new_line)
except Exception:
print(f"There is a problem in {file}.")
raise
def main(*files, add_new_line=True):
process_doc_files(*files, add_new_line=add_new_line)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("files", nargs="+", help="The file(s) or folder(s) to restyle.")
parser.add_argument(
"--remove_new_line",
action="store_true",
help="Whether to remove new line after each python code block instead of adding one.",
)
args = parser.parse_args()
main(*args.files, add_new_line=not args.remove_new_line)
|
StarcoderdataPython
|
1769211
|
import sys
sys.path.append("..")
import wingnet as wn
import argparse
import os
import pandas as pd
import cv2 as cv
import matplotlib.pyplot as plt
def load_project(path):
print("loading from {}".format(path))
if path and os.path.exists(path):
project = pd.read_pickle(path)
for img_path in project["path"]:
if not os.path.exists(img_path):
print("Image {} does not exist!".format(img_path))
return
return project
else:
print("project does not exist: {}".format(path))
def load_and_save_images(in_path, output_path):
project = load_project(in_path)
print(project)
for image_pth, kpts in zip(project['path'], project['keypoints']):
image = cv.imread(image_pth)
xs = [i * image.shape[1] for i in kpts[0::2]]
ys = [i * image.shape[0] for i in kpts[1::2]]
plt.imshow(image)
plt.scatter(xs, ys, marker='x', c='r')
plt.axis('off')
plt.grid(b=None)
# plt.show()
fname = os.path.splitext(os.path.basename(image_pth))[0]
print("save to {}/{}.jpg".format(output_path, fname))
plt.savefig("{}/{}.jpg".format(output_path, fname))
plt.close()
if __name__ == "__main__":
args = argparse.ArgumentParser(description='Save images and keypoints')
args.add_argument('--path', default=None, type=str, help='path to project file')
args.add_argument('-output_path', default=None, type=str, help='path to save images')
args = args.parse_args()
filepath = args.path
output_path = args.output_path
if output_path is None:
# fname = os.path.splitext(filepath)[0]
fname = os.path.splitext(os.path.basename(filepath))[0]
output_path = '/tmp/out_{}'.format(fname)
if not os.path.exists(output_path):
os.makedirs(output_path)
load_and_save_images(filepath, output_path)
|
StarcoderdataPython
|
49414
|
import sys
import requests
from bs4 import BeautifulSoup
from urllib import request
args = sys.argv[1]
url = args
response = request.urlopen(url)
soup = BeautifulSoup(response, features = "html.parser")
response.close()
print(soup.title.text)
print(soup.pre.text)
|
StarcoderdataPython
|
1710804
|
<reponame>khoih-prog/USBComposite_stm32f1<gh_stars>100-1000
from pywinusb import hid
hid.core.show_hids()
|
StarcoderdataPython
|
130551
|
<reponame>Malavikka/ConvLab-2<gh_stars>100-1000
import os
import zipfile
import torch
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from convlab2.policy.hdsa.multiwoz.transformer import Constants
from convlab2.util.file_util import cached_path
from convlab2.util.multiwoz.dbquery import Database
def examine(domain, slot):
if slot == "addr":
slot = 'address'
elif slot == "post":
slot = 'postcode'
elif slot == "ref":
slot = 'ref'
elif slot == "car":
slot = "type"
elif slot == 'dest':
slot = 'destination'
elif domain == 'train' and slot == 'id':
slot = 'trainid'
elif slot == 'leave':
slot = 'leaveat'
elif slot == 'arrive':
slot = 'arriveby'
elif slot == 'price':
slot = 'pricerange'
elif slot == 'depart':
slot = 'departure'
elif slot == 'name':
slot = 'name'
elif slot == 'type':
slot = 'type'
elif slot == 'area':
slot = 'area'
elif slot == 'parking':
slot = 'parking'
elif slot == 'internet':
slot = 'internet'
elif slot == 'stars':
slot = 'stars'
elif slot == 'food':
slot = 'food'
elif slot == 'phone':
slot = 'phone'
elif slot == 'day':
slot = 'day'
else:
slot = 'illegal'
return slot
def truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, file, turn, guid, text_m, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.file = file
self.turn = turn
self.guid = guid
self.text_m = text_m
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, file, turn, input_ids, input_mask, segment_ids, label_id):
self.file = file
self.turn = turn
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class HDSA_predictor():
def __init__(self, archive_file, model_file=None, use_cuda=False):
if not os.path.isfile(archive_file):
if not model_file:
raise Exception("No model for DA-predictor is specified!")
archive_file = cached_path(model_file)
model_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(os.path.join(model_dir, 'checkpoints')):
archive = zipfile.ZipFile(archive_file, 'r')
archive.extractall(model_dir)
load_dir = os.path.join(model_dir, "checkpoints/predictor/save_step_23926")
self.db=Database()
if not os.path.exists(load_dir):
archive = zipfile.ZipFile('{}.zip'.format(load_dir), 'r')
archive.extractall(os.path.dirname(load_dir))
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
self.max_seq_length = 256
self.domain = 'restaurant'
self.model = BertForSequenceClassification.from_pretrained(load_dir,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(-1)), num_labels=44)
self.device = 'cuda' if use_cuda else 'cpu'
self.model.to(self.device)
def gen_example(self, state):
file = ''
turn = 0
guid = 'infer'
act = state['user_action']
for w in act:
d=w[1]
if Constants.domains.index(d.lower()) < 8:
self.domain = d.lower()
hierarchical_act_vecs = [0 for _ in range(44)] # fake target
meta = state['belief_state']
constraints = []
if self.domain != 'bus':
for slot in meta[self.domain]['semi']:
if meta[self.domain]['semi'][slot] != "":
constraints.append([slot, meta[self.domain]['semi'][slot]])
query_result = self.db.query(self.domain, constraints)
if not query_result:
kb = {'count':'0'}
src = "no information"
else:
kb = query_result[0]
kb['count'] = str(len(query_result))
src = []
for k, v in kb.items():
k = examine(self.domain, k.lower())
if k != 'illegal' and isinstance(v, str):
src.extend([k, 'is', v])
src = " ".join(src)
usr = state['history'][-1][-1]
sys = state['history'][-2][-1] if len(state['history']) > 1 else None
example = InputExample(file, turn, guid, src, usr, sys, hierarchical_act_vecs)
kb['domain'] = self.domain
return example, kb
def gen_feature(self, example):
tokens_a = self.tokenizer.tokenize(example.text_a)
tokens_b = self.tokenizer.tokenize(example.text_b)
tokens_m = self.tokenizer.tokenize(example.text_m)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
truncate_seq_pair(tokens_a, tokens_b, self.max_seq_length - 3)
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * (len(tokens_a) + 2)
assert len(tokens) == len(segment_ids)
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
if len(tokens) < self.max_seq_length:
if len(tokens_m) > self.max_seq_length - len(tokens) - 1:
tokens_m = tokens_m[:self.max_seq_length - len(tokens) - 1]
tokens += tokens_m + ['[SEP]']
segment_ids += [0] * (len(tokens_m) + 1)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (self.max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == self.max_seq_length
assert len(input_mask) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
feature = InputFeatures(file=example.file,
turn=example.turn,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=example.label)
return feature
def predict(self, state):
example, kb = self.gen_example(state)
feature = self.gen_feature(example)
input_ids = torch.tensor([feature.input_ids], dtype=torch.long).to(self.device)
input_masks = torch.tensor([feature.input_mask], dtype=torch.long).to(self.device)
segment_ids = torch.tensor([feature.segment_ids], dtype=torch.long).to(self.device)
with torch.no_grad():
logits = self.model(input_ids, segment_ids, input_masks, labels=None)
logits = torch.sigmoid(logits)
preds = (logits > 0.4).float()
preds_numpy = preds.cpu().nonzero().squeeze().numpy()
# for i in preds_numpy:
# if i < 10:
# print(Constants.domains[i], end=' ')
# elif i < 17:
# print(Constants.functions[i-10], end=' ')
# else:
# print(Constants.arguments[i-17], end=' ')
# print()
return preds, kb
|
StarcoderdataPython
|
1657125
|
<reponame>kirillzx/Math-projects
def helix(n):
t = [[0]*n for i in range (n)]
i, j = 0, 0
for k in range(1, n*n+1):
t[i][j]=k
if k == n*n: break
if i<=j+1 and i+j<n-1:
j+=1
elif i<j and i+j>=n-1:
i+=1
elif i>=j and i+j>n-1:
j-=1
elif i>j+1 and i+j<=n-1:
i-=1
return t
#test
res = helix(6)
for i in res:
print(*i)
|
StarcoderdataPython
|
3368230
|
<reponame>Toby-masuku/analysePredict
from . import group_6_module
|
StarcoderdataPython
|
1628210
|
<reponame>ben-hayes/node-hidden-markov-model-tf<filename>test/gaussian.py
import os.path as path
import json
import tensorflow as tf
import tensorflow_probability as tfp
from .tool_generate_data import GenerateData
thisdir = path.dirname(path.realpath(__file__))
# Make data
generator = GenerateData()
states, emissions = generator.data()
# Compute properbility
tf.enable_eager_execution()
data_tf = tf.constant(emissions)
distributions = tfp.distributions.MultivariateNormalFullCovariance(
loc=tf.constant(generator.mu),
covariance_matrix=tf.constant(generator.Sigma)
)
emissions_pdf = distributions.prob(tf.expand_dims(data_tf, -2))
# Save input and output
with open(path.join(thisdir, 'gaussian.json'), 'w') as fp:
json.dump({
'config': generator.config,
'input': {
'mu': generator.mu.tolist(),
'Sigma': generator.Sigma.tolist(),
'emissions': emissions.tolist()
},
'output': emissions_pdf.numpy().tolist()
}, fp)
|
StarcoderdataPython
|
3219201
|
<gh_stars>0
"""
Authors:
kjmasumo
bowerw2
This is our implementation of the Viterbi algorithm for part of speech tagging.
"""
# Implement the Viterbi algorithm.
# Takes in four parameters: 1) a set of unique tags, 2) the provided sentence as a list, 3) a transition probability
# matrix, and 4) an emission probability matrix. Returns a dictionary with the first key 'predicted_tags' mapping to the
# tags predicted for the given sentence and the second key 'probability' mapping to the probability of the last
# state given all previous states.
def viterbi(tags, sent, transition, emission):
lower_sent = [word.lower() for word in sent]
# In the Stanford pseudo-code, tag_probs is 'viterbi' and actual_tags is 'backpointer'
tag_probs = [{}]
actual_tags = [{}]
# Initialization step
for tag in tags:
# Multiply the probability that the first tag comes after a "." by the probability of the observation given
# the tag. Also sentences start with "."
tag_probs[0][tag] = transition["."].prob(tag) * emission[tag].prob(lower_sent[0])
actual_tags[0][tag] = None
# Recursion step
for index in range(1, len(lower_sent)):
# Initialize tag probability dictionary (this_tag_prob) and backpointer dictionary (this_actual_tag)
this_tag_prob = {}
this_actual_tag = {}
# Retrieve the probability dictionary for the previous observation.
prev_tag_prob = tag_probs[-1]
for tag in tags:
# Determine the probability of each tag occurring and retrieve the most likely previous tag path given the
# current tag.
best_prev = max(prev_tag_prob.keys(),
key=lambda prev_tag: prev_tag_prob[prev_tag] * transition[prev_tag].prob(tag) *
emission[tag].prob(lower_sent[index]))
this_actual_tag[tag] = best_prev
# Using the most likely previous tag determine the probability of the current tag occurring.
this_tag_prob[tag] = prev_tag_prob[best_prev] * transition[best_prev].prob(tag) * \
emission[tag].prob(lower_sent[index])
tag_probs.append(this_tag_prob)
actual_tags.append(this_actual_tag)
# Termination step
prev_tag_prob = tag_probs[-1]
# Repeat what was done previously but now looking for "." to mark the end of the sentence.
best_prev = max(prev_tag_prob.keys(),
key=lambda prev_tag: prev_tag_prob[prev_tag] * transition[prev_tag].prob("."))
best_tags_prob = prev_tag_prob[best_prev] * transition[best_prev].prob(".")
# best_tags is the list of tags or hidden states that will be returned
best_tags = [".", best_prev]
# Go backwards through actual_tags to figure out best tag for each word
# and populate best_tags
actual_tags.reverse()
this_best_tag = best_prev
for tag in actual_tags:
best_tags.append(tag[this_best_tag])
this_best_tag = tag[this_best_tag]
# Reverse best_tags to match pos tags with word order
best_tags.reverse()
return {"predicted_tags": best_tags, "probability": best_tags_prob}
|
StarcoderdataPython
|
3383365
|
import singer_sdk.typing as th
from tap_nhl.schemas.stream_schema_object import StreamSchemaObject
class LiveBoxscoreObject(StreamSchemaObject):
properties = th.PropertiesList(
th.Property("gameId", th.IntegerType),
th.Property("teams", th.ObjectType(
th.Property("away", th.ObjectType(
th.Property("team", th.ObjectType(
th.Property("id", th.IntegerType),
th.Property("name", th.StringType),
th.Property("link", th.StringType),
th.Property("abbreviation", th.StringType),
th.Property("triCode", th.StringType),
)),
th.Property("teamStats", th.ObjectType(
th.Property("teamSkaterStats", th.ObjectType(
th.Property("goals", th.IntegerType),
th.Property("pim", th.IntegerType),
th.Property("shots", th.IntegerType),
th.Property("powerPlayPercentage", th.StringType),
th.Property("powerPlayGoals", th.NumberType),
th.Property("powerPlayOpportunities", th.NumberType),
th.Property("faceOffWinPercentage", th.StringType),
th.Property("blocked", th.IntegerType),
th.Property("takeaways", th.IntegerType),
th.Property("giveaways", th.IntegerType),
th.Property("hits", th.IntegerType),
))
)),
th.Property("players", th.ArrayType(th.ObjectType(
# th.Property("player", th.ObjectType(
th.Property("person", th.ObjectType(
th.Property("id", th.IntegerType),
th.Property("fullName", th.StringType),
th.Property("link", th.StringType),
th.Property("shootsCatches", th.StringType),
th.Property("rosterStatus", th.StringType),
)),
th.Property("jerseyNumber", th.StringType),
th.Property("position", th.ObjectType(
th.Property("code", th.StringType),
th.Property("name", th.StringType),
th.Property("type", th.StringType),
th.Property("abbreviation", th.StringType),
)),
th.Property("stats", th.ObjectType(
th.Property("playerStats", th.ObjectType(
th.Property("timeOnIce", th.StringType),
th.Property("assists", th.IntegerType),
th.Property("goals", th.IntegerType),
th.Property("shots", th.IntegerType),
th.Property("hits", th.IntegerType),
th.Property("powerPlayGoals", th.IntegerType),
th.Property("powerPlayAssists", th.IntegerType),
th.Property("penaltyMinutes", th.IntegerType),
th.Property("faceOffWins", th.IntegerType),
th.Property("faceoffTaken", th.IntegerType),
th.Property("takeaways", th.IntegerType),
th.Property("giveaways", th.IntegerType),
th.Property("shortHandedGoals", th.IntegerType),
th.Property("shortHandedAssists", th.IntegerType),
th.Property("blocked", th.IntegerType),
th.Property("plusMinus", th.IntegerType),
th.Property("evenTimeOnIce", th.StringType),
th.Property("powerPlayTimeOnIce", th.StringType),
th.Property("shortHandedTimeOnIce", th.StringType),
th.Property("pim", th.IntegerType),
th.Property("saves", th.IntegerType),
th.Property("powerPlaySaves", th.IntegerType),
th.Property("shortHandedSaves", th.IntegerType),
th.Property("evenSaves", th.IntegerType),
th.Property("shortHandedShotsAgainst", th.IntegerType),
th.Property("evenShotsAgainst", th.IntegerType),
th.Property("powerPlayShotsAgainst", th.IntegerType),
th.Property("decision", th.StringType),
th.Property("savePercentage", th.NumberType),
th.Property("powerPlaySavePercentage", th.NumberType),
th.Property("evenStrengthSavePercentage", th.NumberType),
))
))
))),
# th.Property("goalies", th.ArrayType(th.IntegerType)),
# th.Property("skaters", th.ArrayType(th.IntegerType)),
th.Property("onIce", th.ArrayType(th.IntegerType)),
th.Property("onIcePlus", th.ArrayType(th.ObjectType(
th.Property("playerId", th.IntegerType),
th.Property("shiftDuration", th.IntegerType),
th.Property("stamina", th.IntegerType),
))),
th.Property("scratches", th.ArrayType(th.IntegerType)),
th.Property("penaltyBox", th.ArrayType(th.ObjectType(
th.Property("id", th.IntegerType),
th.Property("timeRemaining", th.StringType),
th.Property("active", th.BooleanType),
))),
th.Property("coaches", th.ArrayType(th.ObjectType(
th.Property("person", th.ObjectType(
th.Property("fullName", th.StringType),
th.Property("link", th.StringType),
)),
th.Property("position", th.ObjectType(
th.Property("code", th.StringType),
th.Property("name", th.StringType),
th.Property("type", th.StringType),
th.Property("abbreviation", th.StringType),
))
)))
)),
th.Property("home", th.ObjectType(
th.Property("team", th.ObjectType(
th.Property("id", th.IntegerType),
th.Property("name", th.StringType),
th.Property("link", th.StringType),
th.Property("abbreviation", th.StringType),
th.Property("triCode", th.StringType),
)),
th.Property("teamStats", th.ObjectType(
th.Property("teamSkaterStats", th.ObjectType(
th.Property("goals", th.IntegerType),
th.Property("pim", th.IntegerType),
th.Property("shots", th.IntegerType),
th.Property("powerPlayGoals", th.NumberType),
th.Property("powerPlayOpportunities", th.NumberType),
th.Property("faceOffWinPercentage", th.StringType),
th.Property("blocked", th.IntegerType),
th.Property("takeaways", th.IntegerType),
th.Property("giveaways", th.IntegerType),
th.Property("hits", th.IntegerType),
))
)),
th.Property("players", th.ArrayType(th.ObjectType(
th.Property("person", th.ObjectType(
th.Property("id", th.IntegerType),
th.Property("fullName", th.StringType),
th.Property("link", th.StringType),
th.Property("shootsCatches", th.StringType),
th.Property("rosterStatus", th.StringType),
)),
th.Property("jerseyNumber", th.StringType),
th.Property("position", th.ObjectType(
th.Property("code", th.StringType),
th.Property("name", th.StringType),
th.Property("type", th.StringType),
th.Property("abbreviation", th.StringType),
)),
th.Property("stats", th.ObjectType(
th.Property("playerStats", th.ObjectType(
th.Property("timeOnIce", th.StringType),
th.Property("assists", th.IntegerType),
th.Property("goals", th.IntegerType),
th.Property("shots", th.IntegerType),
th.Property("hits", th.IntegerType),
th.Property("powerPlayPercentage", th.StringType),
th.Property("powerPlayGoals", th.IntegerType),
th.Property("powerPlayAssists", th.IntegerType),
th.Property("penaltyMinutes", th.IntegerType),
th.Property("faceOffWins", th.IntegerType),
th.Property("faceoffTaken", th.IntegerType),
th.Property("takeaways", th.IntegerType),
th.Property("giveaways", th.IntegerType),
th.Property("shortHandedGoals", th.IntegerType),
th.Property("shortHandedAssists", th.IntegerType),
th.Property("blocked", th.IntegerType),
th.Property("plusMinus", th.IntegerType),
th.Property("evenTimeOnIce", th.StringType),
th.Property("powerPlayTimeOnIce", th.StringType),
th.Property("shortHandedTimeOnIce", th.StringType),
th.Property("pim", th.IntegerType),
th.Property("saves", th.IntegerType),
th.Property("powerPlaySaves", th.IntegerType),
th.Property("shortHandedSaves", th.IntegerType),
th.Property("evenSaves", th.IntegerType),
th.Property("shortHandedShotsAgainst", th.IntegerType),
th.Property("evenShotsAgainst", th.IntegerType),
th.Property("powerPlayShotsAgainst", th.IntegerType),
th.Property("decision", th.StringType),
th.Property("savePercentage", th.NumberType),
th.Property("powerPlaySavePercentage", th.NumberType),
th.Property("evenStrengthSavePercentage", th.NumberType),
))
))
))),
# th.Property("goalies", th.ArrayType(th.IntegerType)),
# th.Property("skaters", th.ArrayType(th.IntegerType)),
th.Property("onIce", th.ArrayType(th.IntegerType)),
th.Property("onIcePlus", th.ArrayType(th.ObjectType(
th.Property("playerId", th.IntegerType),
th.Property("shiftDuration", th.IntegerType),
th.Property("stamina", th.IntegerType),
))),
th.Property("scratches", th.ArrayType(th.IntegerType)),
th.Property("penaltyBox", th.ArrayType(th.ObjectType(
th.Property("id", th.IntegerType),
th.Property("timeRemaining", th.StringType),
th.Property("active", th.BooleanType),
))),
th.Property("coaches", th.ArrayType(th.ObjectType(
th.Property("person", th.ObjectType(
th.Property("fullName", th.StringType),
th.Property("link", th.StringType),
)),
th.Property("position", th.ObjectType(
th.Property("code", th.StringType),
th.Property("name", th.StringType),
th.Property("type", th.StringType),
th.Property("abbreviation", th.StringType),
))
)))
)),
)),
# th.Property("officials", th.ArrayType(th.ObjectType(
# th.Property("official", th.ObjectType(
# th.Property("id", th.IntegerType),
# th.Property("fullName", th.StringType),
# th.Property("link", th.StringType),
# )),
# th.Property("officialType", th.StringType)
# )))
)
|
StarcoderdataPython
|
149203
|
import os
from PIL import Image, ImageFile
from tqdm import tqdm
ImageFile.LOAD_TRUNCATED_IMAGES = True
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from scene_classification.utils import scan_all_files
class TestDataset(Dataset):
def __init__(self, imgdir, transform):
self.imgdir = imgdir
self.image_list = scan_all_files(self.imgdir)
self.transform = transform
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
input_path = self.image_list[idx].rstrip()
img = Image.open(input_path).convert('RGB')
img = self.transform(img)
return img, os.path.basename(input_path)
if __name__ == '__main__':
crop_size = (320, 256)
test_tfms = transforms.Compose([
transforms.Resize(crop_size),
# transforms.Resize(int(crop_size / 0.875)),
# transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
imgdir = '/Users/01384153/sf_wpf/dataset/test_dataset/zhengjianzhao'
dataset = TestDataset(imgdir=imgdir, transform=test_tfms)
testloader = torch.utils.data.DataLoader(dataset, batch_size=96, shuffle=False, num_workers=24)
for data in tqdm(testloader):
# get the inputs and assign them to cuda
inputs = data
print(inputs)
# # inputs = inputs.to(device).half() # uncomment for half precision model
# inputs = inputs.cuda()
# labels = labels.cuda()
#
# outputs = model(inputs)
# _, predicted = torch.max(outputs.data, 1)
# loss = criterion(outputs, labels)
# loss.backward()
# optimizer.step()
|
StarcoderdataPython
|
1606826
|
<reponame>threefoldtech/jumpscaleX_libs_extra
from Jumpscale import j
# from .CapacityPlanner import CapacityPlanner
JSBASE = j.baseclasses.object
DIR_ITEMS = j.clients.threefold_directory.capacity
class Models:
pass
class FarmerFactory(JSBASE):
def __init__(self):
# self.__jslocation__ = "_j.tools.threefoldgrid"
JSBASE.__init__(self)
self._zerotier_client = None
self._zerotier_net_sysadmin = None
# self.zerotier_net_tfgrid = self.zerotier_client.network_get("") #TODO:*1
self._iyo = None
self._jwt = None
# self.capacity_planner = CapacityPlanner()
self.zdb = None
self._models = None
self._bcdb = None
@property
def zerotier_client(self):
if not self._zerotier_client:
self._zerotier_client = j.clients.zerotier.get("sysadmin")
return self._zerotier_client
@property
def zerotier_net_sysadmin(self):
if not self._zerotier_net_sysadmin:
self._zerotier_net_sysadmin = self.zerotier_client.network_get(
"1d71939404587f3c"
) # don't change the nr is fixed
return self._zerotier_net_sysadmin
@property
def iyo(self):
if not self._iyo:
self._iyo = j.clients.itsyouonline.get()
return self._iyo
@property
def jwt(self):
if not self._jwt:
self._jwt = self.iyo.jwt_get(refreshable=True, scope="user:memberof:threefold.sysadmin")
return self._jwt
@property
def bcdb(self):
if self.zdb is None:
raise j.exceptions.Base("you need to set self.zdb with a zerodb connection")
if self._bcdb is None:
self._bcdb = j.data.bcdb.get(self.zdb)
return self._bcdb
@property
def models(self):
if self.zdb is None:
raise j.exceptions.Base("you need to set self.zdb with a zerodb connection")
if self._models is None:
models_path = j.clients.git.getContentPathFromURLorPath(
"https://github.com/threefoldtech/digital_me/tree/development_simple/packages/threefold/models"
)
self.bcdb.models_add(models_path, overwrite=True)
self._models = Models()
self._models.nodes = self.bcdb.model_get(url="threefold.grid.node")
self._models.farmers = self.bcdb.model_get(url="threefold.grid.farmer")
self._models.reservations = self.bcdb.model_get(url="threefold.grid.reservation")
self._models.threebots = self.bcdb.model_get(url="threefold.grid.threebot")
self._models.webgateways = self.bcdb.model_get(url="threefold.grid.webgateway")
self.capacity_planner.models = self._models
return self._models
@property
def nodes_active_sysadmin_nr(self):
"""
how many nodes with ZOS have been found in sysadmin network
:return:
"""
nr_zos_sysadmin = len(self.models.index.select().where(self.models.index.up_zos is True))
print("Found nr of nodes which can be managed over ZOS:%s" % nr_zos_sysadmin)
return nr_zos_sysadmin
@staticmethod
def _tf_dir_node_find(ipaddr=None, node_id=None):
for item in DIR_ITEMS:
if ipaddr and "robot_address" in item and ipaddr in item["robot_address"]:
return item
if node_id and node_id.lower() == item["node_id"].lower():
return item
@staticmethod
def robot_get(node):
"""
:param node:
:return: robot connection for node (model) specified
"""
if not node.noderobot_ipaddr:
return None
if not node.node_zos_id:
return None
j.clients.zrobot.get(instance=node.node_zos_id, data={"url": node.noderobot_ipaddr})
robot = j.clients.zrobot.robots[node.node_zos_id]
return robot
def farmer_get_from_dir(self, name, return_none_if_not_exist=False):
res = self.models.farmers.index.select().where(self.models.farmers.index.name == name).execute()
if len(res) > 0:
o = self.models.farmers.get(res[0].id)
else:
if return_none_if_not_exist:
return
o = self.models.farmers.new()
return o
def farmers_load(self):
"""
will get all farmers from tf directory & load in BCDB
"""
farmers = j.clients.threefold_directory.farmers
for farmer in farmers:
if "name" not in farmer:
continue
obj = self.farmer_get_from_dir(farmer["name"])
obj.name = farmer["name"]
for wallet_addr in farmer["wallet_addresses"]:
if wallet_addr not in obj.wallets:
obj.wallets.append(wallet_addr)
obj.iyo_org = farmer["iyo_organization"]
self.models.farmers.set(obj)
def node_get_from_zerotier(self, node_addr, return_none_if_not_exist=False):
"""
get the node starting from address in zerotier
:param node_addr:
:param return_none_if_not_exist:
:return:
"""
res = self.models.nodes.index.select().where(self.models.nodes.index.node_zerotier_id == node_addr).execute()
if len(res) > 0:
o = self.models.nodes.get(res[0].id)
else:
if return_none_if_not_exist:
return
o = self.models.nodes.new()
return o
def node_get_from_tfdir(self, node_host_id, return_none_if_not_exist=False):
"""
get the node starting from tf directory property
:param node_host_id:
:param return_none_if_not_exist:
:return:
"""
res = self.models.nodes.index.select().where(self.models.nodes.index.node_zos_id == node_host_id).execute()
if len(res) > 0:
o = self.models.nodes.get(res[0].id)
else:
if return_none_if_not_exist:
return
o = self.models.nodes.new()
return o
def zerotier_scan(self, reset=False):
"""
will do a scan of the full zerotier sysadmin network, this can take a long time
:return:
js_shell 'j.tools.threefold_farmer.zerotier_scan()'
"""
for node in self.zerotier_net_sysadmin.members_list():
online = node.data["online"] # online from zerotier
# online_past_sec = int(j.data.time.epoch - node.data["lastOnline"] / 1000)
ipaddr = node.data["config"]["ipAssignments"][0]
if online:
o = self.node_get_from_zerotier(node.address)
o.sysadmin_ipaddr = ipaddr
o.node_zerotier_id = node.address
self.node_check(o, reset=reset)
else:
o = self.node_get_from_zerotier(node.address, return_none_if_not_exist=True)
if o is not None:
# means existed in DB
self.node_check(o, reset=reset)
def tf_dir_scan(self, reset=False):
"""
walk over all nodes found in tfdir
do ping test over pub zerotier grid network
:return:
"""
for item in j.clients.threefold_directory.capacity:
node = self.node_get_from_tfdir(item["node_id"])
self.node_check(node, reset=reset)
def _fail_save(self):
if not self._bcdb:
self.zdb = j.servers.zdb.test_instance_start(reset=False)
self._bcdb = j.data.bcdb.get(self.zdb, reset=False)
def load(self, reset=False):
"""
load the info from different paths into database
kosmos 'j.tools.threefoldgrid.load(reset=True)'
:param reset:
:return:
"""
self.zdb = j.servers.zdb.test_instance_start(reset=reset)
self._bcdb = j.data.bcdb.get(self.zdb, reset=reset) # to make sure we reset the index
self.farmers_load()
self.zerotier_scan(reset=reset)
# self.tf_dir_scan(reset=reset)
|
StarcoderdataPython
|
1768928
|
<reponame>ManderaGeneral/generalbrowser
from unittest import TestCase
class Test(TestCase):
def test(self):
pass
|
StarcoderdataPython
|
3278932
|
<filename>src/combinational_element_factory.py
from __future__ import division
import os, math
me = os.path.dirname(__file__)
from block_constants import *
from Element import CombinationalElement
from pymclevel.schematic import MCSchematic
from pymclevel.box import BoundingBox
from pymclevel import alphaMaterials
form_tall = MCSchematic(filename=os.path.join(me, "..", "res", "generic_boolean_blank.schematic"))
form_short = MCSchematic(filename=os.path.join(me, "..", "res", "generic_boolean_short_blank.schematic"))
formBox_tall = BoundingBox((0, 0, 0), (form_tall.Width, form_tall.Height, form_tall.Length))
formBox_short = BoundingBox((0, 0, 0), (form_short.Width, form_short.Height, form_short.Length))
def generate(comb_equation, use_input_color_key = None, use_output_color_key = None):
inputs = comb_equation.inputs
minterms = comb_equation.minterms
form = form_tall if len(minterms) > 5 else form_short
formBox = formBox_tall if len(minterms) > 5 else formBox_short
implicantLimit = 13 if len(minterms) > 5 else 5
while len(minterms) % implicantLimit != 0:
minterms.append({})
numXCopies = int(math.ceil(len(inputs) / 4))
sizeX = numXCopies * form.Width + 2
numYCopies = int(math.ceil(len(minterms) / implicantLimit))
sizeY = numYCopies * form.Height + 3
sizeZ = form.Length + 1
# print sizeX, sizeY, sizeZ
level = MCSchematic(shape=(sizeX, sizeY, sizeZ))
box = BoundingBox((0, 0, 0), (sizeX, sizeY, sizeZ))
# ================================================================================================
# Paste the schematic the number of times we know we'll need
pasteX = 0
for i in range(numXCopies):
pasteY = 1
for i in range(numYCopies):
level.copyBlocksFrom(form, formBox, (pasteX, pasteY, 0))
pasteY += form.Height
pasteX += form.Width
# Fill the bottom plane with a ground
# level.fillBlocks(BoundingBox((0, 0, 0), (sizeX, 1, sizeZ)), alphaMaterials.BlockofIron)
# Build X-ways across each row corresponding to each term
cx = 0
cy = 2
cz = 1
numTerms = 0
side = CLOSE_SIDE
relative_input_locations = {}
for termIndex in range(len(minterms)):
term = minterms[termIndex]
cx = 0
for i in inputs:
if i in term.keys():
mat = TORCH if term[i] else REDSTONE
else:
mat = AIR
data = TORCH_POINTING_NEG_Z if (cz == 1) else TORCH_POINTING_POS_Z
level.setBlockAt(cx, cy, cz, mat)
level.setBlockDataAt(cx, cy, cz, data)
if termIndex == 0:
sx = cx
sy = cy - 2
sz = cz + 4
for iter_sz in [sz, sz+1, sz+2, sz+3]:
level.setBlockAt(sx, sy, iter_sz, WOOL)
data = WOOL_BLACK if use_input_color_key == None else use_input_color_key[i]
level.setBlockDataAt(sx, sy, iter_sz, data)
relative_input_locations[i] = [sx, sy, sz+2]
cx += 2
# Build the slice of the side scaffolding that goes on this row's height level:
# -----------------------------------------------------------------------------
prevCy = cy
prevCz = cz
cx = box.width - 2
if side == CLOSE_SIDE:
cz -= 1
cy -= 1
elif side == FAR_SIDE:
cz += 1
cy -= 1
if len(term) > 0:
level.setBlockAt(cx, cy, cz, TORCH)
level.setBlockDataAt(cx, cy, cz, TORCH_POINTING_POS_X)
cx += 1
cy -= 1
if numTerms in [0, 1]:
level.setBlockAt(cx, cy, cz, DOUBLE_SLAB)
level.setBlockDataAt(cx, cy, cz, DOUBLE_SLAB_STONE)
else:
level.setBlockAt(cx, cy, cz, SLAB)
level.setBlockDataAt(cx, cy, cz, STONE_SLAB_TOP)
cy += 1
level.setBlockAt(cx, cy, cz, REDSTONE)
if side == CLOSE_SIDE:
cz += 1
elif side == FAR_SIDE:
cz -= 1
level.setBlockAt(cx, cy, cz, SLAB)
level.setBlockDataAt(cx, cy, cz, STONE_SLAB_TOP)
cy += 1
level.setBlockAt(cx, cy, cz, REDSTONE)
if side == CLOSE_SIDE:
currentCloseTowerTopY = cy
currentCloseTowerTopZ = cz
elif side == FAR_SIDE:
currentFarTowerTopY = cy
currentFarTowerTopZ = cz
cy = prevCy
cz = prevCz
# -----------------------------------------------------------------------------
# Switch sides
side = FAR_SIDE if (side == CLOSE_SIDE) else CLOSE_SIDE
# The z location alternates depending on the side
if side == CLOSE_SIDE: cz = 1
if side == FAR_SIDE: cz = 8
# Keep track of the number of terms
numTerms += 1
# JUMP LOGIC
# Normal case: cy goes up by one, we are working term by term up one paste of the schematic
# Special case: We have done 13 terms, and need to 'jump' to the next paste of the schematic
# This requires some special connecting and bridging.
# ------------------------------------------------------------------------------------------
if numTerms == implicantLimit:
sx = box.width - 1
sy = currentCloseTowerTopY
sz = currentCloseTowerTopZ
sz += 1
level.setBlockAt(sx, sy, sz, WOOL)
level.setBlockDataAt(sx, sy, sz, WOOL_BLACK)
sz += 1
level.setBlockAt(sx, sy, sz, TORCH)
level.setBlockDataAt(sx, sy, sz, TORCH_POINTING_POS_Z)
sy += 1
for itr_sz in [sz, sz-1, sz-2]:
level.setBlockAt(sx, sy, itr_sz, WOOL)
level.setBlockDataAt(sx, sy, itr_sz, WOOL_BLACK)
sy += 1
level.setBlockAt(sx, sy, sz, TORCH)
level.setBlockDataAt(sx, sy, sz, TORCH_ON_GROUND)
sz -= 1
level.setBlockAt(sx, sy, sz, REDSTONE)
sz -= 1
level.setBlockAt(sx, sy, sz, REPEATER)
# If we are finished with the whole thing, make the lead the exposes
# The signal to the rest of the world
if termIndex == len(minterms) - 1:
sz += 2
sy += 1
data = WOOL_BLACK if use_output_color_key == None else use_output_color_key[comb_equation.name]
for iter_sz in range(sz, sz+7, 1):
level.setBlockAt(sx, sy, iter_sz, WOOL)
level.setBlockDataAt(sx, sy, iter_sz, data)
sy += 1
level.setBlockAt(sx, sy, iter_sz, REDSTONE)
sy -= 1
sz += 7
level.setBlockAt(sx, sy, sz, WOOL)
level.setBlockDataAt(sx, sy, sz, data)
sy += 1
level.setBlockAt(sx, sy, sz, REPEATER)
level.setBlockDataAt(sx, sy, sz, REPEATER_TOWARD_POS_Z)
lead_location = [sx, sy, sz]
# -----------------------------------------------------
sx = box.width - 1
sy = currentFarTowerTopY
sz = currentFarTowerTopZ
sz -= 1
level.setBlockAt(sx, sy, sz, WOOL)
level.setBlockDataAt(sx, sy, sz, WOOL_BLACK)
sy += 1
level.setBlockAt(sx, sy, sz, 75)
level.setBlockDataAt(sx, sy, sz, 5)
sz += 1
level.setBlockAt(sx, sy, sz, WOOL)
level.setBlockDataAt(sx, sy, sz, WOOL_BLACK)
sz -= 1
sy += 1
level.setBlockAt(sx, sy, sz, WOOL)
level.setBlockDataAt(sx, sy, sz, WOOL_BLACK)
sz += 1
level.setBlockAt(sx, sy, sz, REDSTONE)
sz += 1
level.setBlockAt(sx, sy, sz, WOOL)
level.setBlockDataAt(sx, sy, sz, WOOL_BLACK)
sy += 1
level.setBlockAt(sx, sy, sz, TORCH)
level.setBlockDataAt(sx, sy, sz, TORCH_ON_GROUND)
# Now reset the variables for working up the next paste:
cy += 4
numTerms = 0
side = CLOSE_SIDE
cz = 1
else:
cy += 1
# level.setBlockAt(0, 0, 0, 20)
# level.setBlockAt(box.width-1, box.height-1, box.length-1, 20)
# # Flip the entire schematic around to help make the fitting routine more 'sane'
# # Also adjust location variables (like the locations of the lead and inputs) to
# # reflect the Z-flip
#
# level.flipEastWest()
# lead_location[2] = sizeZ - 1 - lead_location[2]
# for ril in relative_input_locations.values():
# ril[2] = sizeZ - 1 - ril[2]
# level.setBlockAt(*lead_location, blockID = 35)
# level.setBlockDataAt(*lead_location, newdata = 0)
#
# for ril in relative_input_locations.values():
# level.setBlockAt(*ril, blockID = GLASS)
ret = CombinationalElement(level)
ret.relative_output_locations = {comb_equation.name : lead_location}
ret.relative_input_locations = relative_input_locations
ret.size = (sizeX, sizeY, sizeZ)
return ret
|
StarcoderdataPython
|
1671488
|
from django.db.models import base, Model
from django.utils.translation import get_language
class TranslatableMeta(base.ModelBase):
"""
Metaclass that looks for a special field called __translatable__ which is a dictionary containing keys as the name
of the translatable properties, and the values being lambda functions that generate the field.
Example:
__translatable__ = {
'name': lambda l: models.CharField(
_("Name in {LANGUAGE_NAME}".format(**l)),
max_length=256,
default='',
blank=True,
),
}
The metaclass will read those fields, and generate suffixed fields that match what languages are available
in the settings.
Example: (Settings contain: ar, fa, fr, el, en)
name -> name_ar
name_fa
name_fr
name_el
name_en
For each translatable property, this metaclass also creates a dynamic property named after the original field
defined in the __translatable__ field (in the case above, the new property will be called "name").
This special property will use the django i18n tools to detect the user's language, and if it is found in the
object, it will return the translated language. If not, it returns the english version.
This metaclass will also search for a property named either "name" or "title", and if it is available,
it will make the special property above return when __str__ is called.
"""
def __new__(cls, name, bases, attrs):
super_new = super(TranslatableMeta, cls).__new__
def name_in_default_language(self):
"""
Function that is injected in the __str__ function if the model has a "Name" property
:param self: object that inherits this metaclass
:return: The value from the name property.
"""
return self.name
def title_in_default_language(self):
"""
Function that is injected in the __str__ function if the model doesn't have a "Name" property,
but it does have a "Title" property.
:param self: object that inherits this metaclass
:return: The value of the title property
"""
return self.title
def fallback_field(field):
"""
Functions that generates a property that wraps the translated field. This function will generate a property
without the _<ISO> suffix. The value of the output and the destination of the input will depend on what
language the user has currently selected
:param field: name of the field to be wrapped
:return: property object that can be added to the metaclass
"""
fallback_name = '%s_%s' % (field, 'en')
def getter(self):
"""
Getter function that detects the language used by the request and returns the value in that language.
Falls back to english.
:param self: object that inherits this metaclass
:return: translated property in the language selected in the request
"""
language = get_language()
field_name = '%s_%s' % (field, language[:2])
if hasattr(self, field_name) and getattr(self, field_name):
return getattr(self, field_name)
return getattr(self, fallback_name) # Always fall back to english
def setter(self, value):
"""
Setter function that detects the language of the request and sets the value of the appropriate property.
Falls back to english
:param self: object that inherits this metaclass
:param value: value to be set in property
:return: None
"""
language = get_language()
field_name = '%s_%s' % (field, language[:2])
if hasattr(self, field_name):
setattr(self, field_name, value)
else:
# falling back setter to english
setattr(self, fallback_name, value)
return property(getter, setter)
if '__translatable__' in attrs:
__translatable__ = attrs['__translatable__']
new_attributes = dict()
# There are issues importing some settings from the the top of models.py
from django.conf import settings
# Enumerates the translatable items defined in the subclass
for attribute_name, attribute_lambda in __translatable__.items():
# Enumerates the languages in the settings
for language_code, language_name in settings.LANGUAGES:
# The information below is passed down to the lambda functions
# the use of lambda functions is deliberate, to avoid the complexity of the copying objects
language_dictionary = {'LANGUAGE_CODE': language_code, 'LANGUAGE_NAME': language_name}
new_attributes.update({
"{}_{}".format(attribute_name, language_code): attribute_lambda(language_dictionary)
})
# Adding a dynamic property that detects the current language to the mix
new_attributes.update({attribute_name: fallback_field(attribute_name)})
new_attributes.update(attrs)
attrs = new_attributes
# If the attributes contain either name or title, make one of those the property returned by __str__
if 'name' in __translatable__:
attrs['__str__'] = name_in_default_language
elif 'title' in __translatable__:
attrs['__str__'] = title_in_default_language
# Remove the __translatable__ field to make the runtime object a little less crowded
del attrs['__translatable__']
new_ = super_new(cls, name, bases, attrs)
return new_
"""
The class below is a frankenstein that fools the migration engine to ignore it.
"""
TranslatableModel = TranslatableMeta(
"TranslatableModel",
(Model,),
{
'__module__': TranslatableMeta.__module__,
'Meta': type("Meta", (object,), {'abstract': True})
}
)
|
StarcoderdataPython
|
119637
|
<reponame>fstwn/Cockatoo<filename>modules/networkx/algorithms/tests/test_mixing_degree.py
#!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx
import networkx.algorithms.mixing as mixing
class TestDegreeMixing(object):
def setUp(self):
self.P4=networkx.path_graph(4)
self.D=networkx.DiGraph()
self.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)])
self.M=networkx.MultiGraph()
self.M.add_path(list(range(4)))
self.M.add_edge(0,1)
self.S=networkx.Graph()
self.S.add_edges_from([(0,0),(1,1)])
def test_node_degree_xy_undirected(self):
xy=sorted(mixing.node_degree_xy(self.P4))
xy_result=sorted([(1,2),
(2,1),
(2,2),
(2,2),
(1,2),
(2,1)])
assert_equal(xy,xy_result)
def test_node_degree_xy_directed(self):
xy=sorted(mixing.node_degree_xy(self.D))
xy_result=sorted([(2,1),
(2,3),
(1,3),
(1,3)])
assert_equal(xy,xy_result)
def test_node_degree_xy_multigraph(self):
xy=sorted(mixing.node_degree_xy(self.M))
xy_result=sorted([(2,3),
(2,3),
(3,2),
(3,2),
(2,3),
(3,2),
(1,2),
(2,1)])
assert_equal(xy,xy_result)
def test_node_degree_xy_selfloop(self):
xy=sorted(mixing.node_degree_xy(self.S))
xy_result=sorted([(2,2),
(2,2)])
assert_equal(xy,xy_result)
def test_degree_mixing_dict_undirected(self):
d=mixing.degree_mixing_dict(self.P4)
d_result={1:{2:2},
2:{1:2,2:2},
}
assert_equal(d,d_result)
def test_degree_mixing_dict_directed(self):
d=mixing.degree_mixing_dict(self.D)
print(d)
d_result={1:{3:2},
2:{1:1,3:1},
3:{}
}
assert_equal(d,d_result)
def test_degree_mixing_dict_multigraph(self):
d=mixing.degree_mixing_dict(self.M)
d_result={1:{2:1},
2:{1:1,3:3},
3:{2:3}
}
assert_equal(d,d_result)
class TestDegreeMixingMatrix(object):
@classmethod
def setupClass(cls):
global np
global npt
try:
import numpy as np
import numpy.testing as npt
except ImportError:
raise SkipTest('NumPy not available.')
def setUp(self):
self.P4=networkx.path_graph(4)
self.D=networkx.DiGraph()
self.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)])
self.M=networkx.MultiGraph()
self.M.add_path(list(range(4)))
self.M.add_edge(0,1)
self.S=networkx.Graph()
self.S.add_edges_from([(0,0),(1,1)])
def test_degree_mixing_matrix_undirected(self):
a_result=np.array([[0,0,0],
[0,0,2],
[0,2,2]]
)
a=mixing.degree_mixing_matrix(self.P4,normalized=False)
npt.assert_equal(a,a_result)
a=mixing.degree_mixing_matrix(self.P4)
npt.assert_equal(a,a_result/float(a_result.sum()))
def test_degree_mixing_matrix_directed(self):
a_result=np.array([[0,0,0,0],
[0,0,0,2],
[0,1,0,1],
[0,0,0,0]]
)
a=mixing.degree_mixing_matrix(self.D,normalized=False)
npt.assert_equal(a,a_result)
a=mixing.degree_mixing_matrix(self.D)
npt.assert_equal(a,a_result/float(a_result.sum()))
def test_degree_mixing_matrix_multigraph(self):
a_result=np.array([[0,0,0,0],
[0,0,1,0],
[0,1,0,3],
[0,0,3,0]]
)
a=mixing.degree_mixing_matrix(self.M,normalized=False)
npt.assert_equal(a,a_result)
a=mixing.degree_mixing_matrix(self.M)
npt.assert_equal(a,a_result/float(a_result.sum()))
def test_degree_mixing_matrix_selfloop(self):
a_result=np.array([[0,0,0],
[0,0,0],
[0,0,2]]
)
a=mixing.degree_mixing_matrix(self.S,normalized=False)
npt.assert_equal(a,a_result)
a=mixing.degree_mixing_matrix(self.S)
npt.assert_equal(a,a_result/float(a_result.sum()))
def test_degree_assortativity_undirected(self):
r=mixing.degree_assortativity(self.P4)
npt.assert_almost_equal(r,-1.0/2,decimal=4)
def test_degree_assortativity_directed(self):
r=mixing.degree_assortativity(self.D)
npt.assert_almost_equal(r,-0.57735,decimal=4)
def test_degree_assortativity_multigraph(self):
r=mixing.degree_assortativity(self.M)
npt.assert_almost_equal(r,-1.0/7.0,decimal=4)
class TestDegreeMixingMatrixPearsonr(object):
@classmethod
def setupClass(cls):
global np
global npt
try:
import numpy as np
import numpy.testing as npt
except ImportError:
raise SkipTest('NumPy not available.')
try:
import scipy
except ImportError:
raise SkipTest('SciPy not available.')
def setUp(self):
self.P4=networkx.path_graph(4)
self.D=networkx.DiGraph()
self.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)])
self.M=networkx.MultiGraph()
self.M.add_path(list(range(4)))
self.M.add_edge(0,1)
self.S=networkx.Graph()
self.S.add_edges_from([(0,0),(1,1)])
def test_degree_assortativity_undirected(self):
r=mixing.degree_pearsonr(self.P4)
npt.assert_almost_equal(r,-1.0/2,decimal=4)
def test_degree_assortativity_directed(self):
r=mixing.degree_pearsonr(self.D)
npt.assert_almost_equal(r,-0.57735,decimal=4)
def test_degree_assortativity_multigraph(self):
r=mixing.degree_pearsonr(self.M)
npt.assert_almost_equal(r,-1.0/7.0,decimal=4)
|
StarcoderdataPython
|
1740378
|
import json
import argparse
from typing import List, Dict
import glob
import os
import pathlib
import pdb
import subprocess
from io import StringIO
import torch
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
import logging
from tqdm import tqdm
from matplotlib import pyplot as plt
import numpy as np
import torch.autograd.profiler as profiler
from torch.nn import functional as F
import pandas as pd
import kornia
from encoders import LSTMEncoder
from language_only import LanguageEncoder
from language_embedders import RandomEmbedder
from unet_module import BaseUNet, UNetWithLanguage, UNetWithBlocks
from unet_shared import SharedUNet
from mlp import MLP
from data import DatasetReader
from train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer
class LanguageAloneTrainer(FlatLanguageTrainer):
def __init__(self,
train_data: List,
val_data: List,
encoder: LanguageEncoder,
optimizer: torch.optim.Optimizer,
num_epochs: int,
num_blocks: int,
device: torch.device,
checkpoint_dir: str,
num_models_to_keep: int,
generate_after_n: int,
depth: int = 7,
best_epoch: int = -1,
zero_weight: float = 0.05):
super(LanguageAloneTrainer, self).__init__(train_data,
val_data,
encoder,
optimizer,
num_epochs,
num_blocks,
device,
checkpoint_dir,
num_models_to_keep,
generate_after_n,
depth,
best_epoch)
self.xent_loss_fxn = torch.nn.CrossEntropyLoss()
def train_and_validate_one_epoch(self, epoch):
print(f"Training epoch {epoch}...")
self.encoder.train()
skipped = 0
for b, batch_instance in tqdm(enumerate(self.train_data)):
self.optimizer.zero_grad()
lang_outputs = self.encoder(batch_instance)
loss = self.compute_loss(batch_instance, lang_outputs)
loss.backward()
self.optimizer.step()
print(f"skipped {skipped} examples")
print(f"Validating epoch {epoch}...")
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.val_data)):
block_acc = self.validate(dev_batch_instance, epoch, b, 0)
total_block_acc += block_acc
total += 1
mean_block_acc = total_block_acc / total
print(f"Epoch {epoch} has block acc {mean_block_acc * 100}")
return mean_block_acc, 0.0
def compute_loss(self, inputs, lang_outputs):
pred_next_block_logits = lang_outputs["pred_block_logits"]
true_next_block_idxs = inputs["block_to_move"]
true_next_block_idxs = true_next_block_idxs.to(self.device).long().reshape(-1)
block_loss = self.xent_loss_fxn(pred_next_block_logits, true_next_block_idxs)
return block_loss
def validate(self, batch_instance, epoch_num, batch_num, instance_num):
self.encoder.eval()
#pdb.set_trace()
lang_outputs= self.encoder(batch_instance)
block_accuracy = self.compute_block_accuracy(batch_instance, lang_outputs)
return block_accuracy
def main(args):
if args.binarize_blocks:
args.num_blocks = 1
device = "cpu"
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f"cuda:{free_gpu_id}"
device = torch.device(device)
print(f"On device {device}")
test = torch.ones((1))
test = test.to(device)
# load the data
dataset_reader = DatasetReader(args.train_path,
args.val_path,
None,
batch_by_line = args.traj_type != "flat",
traj_type = args.traj_type,
batch_size = args.batch_size,
max_seq_length = args.max_seq_length,
do_filter = args.do_filter,
top_only = args.top_only,
binarize_blocks = args.binarize_blocks)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not args.test:
print(f"Reading data from {args.train_path}")
train_vocab = dataset_reader.read_data("train")
try:
os.mkdir(checkpoint_dir)
except FileExistsError:
pass
with open(checkpoint_dir.joinpath("vocab.json"), "w") as f1:
json.dump(list(train_vocab), f1)
else:
print(f"Reading vocab from {checkpoint_dir}")
with open(checkpoint_dir.joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
print(f"Reading data from {args.val_path}")
dev_vocab = dataset_reader.read_data("dev")
print(f"got data")
# construct the vocab and tokenizer
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
print(f"constructing model...")
# get the embedder from args
if args.embedder == "random":
embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)
else:
raise NotImplementedError(f"No embedder {args.embedder}")
# get the encoder from args
if args.encoder == "lstm":
encoder = LSTMEncoder(input_dim = args.embedding_dim,
hidden_dim = args.encoder_hidden_dim,
num_layers = args.encoder_num_layers,
dropout = args.dropout,
bidirectional = args.bidirectional)
else:
raise NotImplementedError(f"No encoder {args.encoder}") # construct the model
encoder = LanguageEncoder(embedder,
encoder,
device )
if args.cuda is not None:
encoder= encoder.cuda(device)
print(encoder)
# construct optimizer
optimizer = torch.optim.Adam(encoder.parameters())
#optimizer = torch.optim.SGD(encoder.parameters(), lr = 0.01 )
best_epoch = -1
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
# file exists
try:
assert(len(glob.glob(os.path.join(args.checkpoint_dir, "*.th"))) == 0)
except AssertionError:
raise AssertionError(f"Output directory {args.checkpoint_dir} non-empty, will not overwrite!")
else:
# resume from pre-trained
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"))
encoder.load_state_dict(state_dict, strict=True)
# get training info
best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath("best_training_state.json")))
print(f"best_checkpoint_data {best_checkpoint_data}")
best_epoch = best_checkpoint_data["epoch"]
# save arg config to checkpoint_dir
with open(pathlib.Path(args.checkpoint_dir).joinpath("config.json"), "w") as f1:
json.dump(args.__dict__, f1)
# construct trainer
trainer = LanguageAloneTrainer(train_data = dataset_reader.data["train"],
val_data = dataset_reader.data["dev"],
encoder = encoder,
optimizer = optimizer,
num_epochs = args.num_epochs,
num_blocks = args.num_blocks,
device = device,
checkpoint_dir = args.checkpoint_dir,
num_models_to_keep = args.num_models_to_keep,
generate_after_n = 110,
depth = 0,
best_epoch = best_epoch,
zero_weight = 0)
trainer.train()
else:
# test-time, load best model
print(f"loading model weights from {args.checkpoint_dir}")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"))
encoder.load_state_dict(state_dict, strict=True)
trainer = LanguageAloneTrainer(train_data = dataset_reader.data["train"],
val_data = dataset_reader.data["dev"],
encoder = encoder,
optimizer = optimizer,
num_epochs = args.num_epochs,
num_blocks = args.num_blocks,
device = device,
checkpoint_dir = args.checkpoint_dir,
num_models_to_keep = args.num_models_to_keep,
generate_after_n = 110,
depth = 0,
best_epoch = best_epoch,
zero_weight = 0)
print(f"evaluating")
eval_trainer.evaluate()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# training
parser.add_argument("--test", action="store_true", help="load model and test")
parser.add_argument("--resume", action="store_true", help="resume training a model")
# data
parser.add_argument("--train-path", type=str, help="path to train data")
parser.add_argument("--val-path", type=str, help = "path to dev data" )
parser.add_argument("--num-blocks", type=int, default=20)
parser.add_argument("--binarize-blocks", action="store_true", help="flag to treat block prediction as binary task instead of num-blocks-way classification")
parser.add_argument("--traj-type", type=str, default="flat", choices = ["flat", "trajectory"])
parser.add_argument("--batch-size", type=int, default = 32)
parser.add_argument("--max-seq-length", type=int, default = 65)
parser.add_argument("--do-filter", action="store_true", help="set if we want to restrict prediction to the block moved")
parser.add_argument("--top-only", action="store_true", help="set if we want to train/predict only the top-most slice of the top-down view")
# language embedder
parser.add_argument("--embedder", type=str, default="random", choices = ["random", "glove"])
parser.add_argument("--embedding-dim", type=int, default=300)
# language encoder
parser.add_argument("--encoder", type=str, default="lstm", choices = ["lstm", "transformer"])
parser.add_argument("--encoder-hidden-dim", type=int, default=128)
parser.add_argument("--encoder-num-layers", type=int, default=2)
parser.add_argument("--bidirectional", action="store_true")
# block mlp
parser.add_argument("--compute-block-dist", action="store_true")
parser.add_argument("--mlp-hidden-dim", type=int, default = 128)
parser.add_argument("--mlp-num-layers", type=int, default = 3)
# misc
parser.add_argument("--dropout", type=float, default=0.2)
parser.add_argument("--cuda", type=int, default=None)
parser.add_argument("--checkpoint-dir", type=str, default="models/language_pretrain")
parser.add_argument("--num-models-to-keep", type=int, default = 5)
parser.add_argument("--num-epochs", type=int, default=3)
args = parser.parse_args()
main(args)
|
StarcoderdataPython
|
170348
|
<filename>rail/likelihood.py
"""
A class to represent a Likelihood
"""
from collections import UserDict
from matplotlib import pyplot as plt
import numpy as np
class Likelihood(UserDict):
"""
A class to represent a Likelihood
"""
def __init__(self, lam: float) -> None:
self.data = {}
if lam < 0:
raise ValueError(
"Likelihood value lam must be greater than or equal to zero."
)
self.data["name"] = str(lam)
self.data["lam"] = lam
def plot(self, axes=None) -> tuple:
"""
A method to plot the likelihood
"""
s = np.random.poisson(self.data["lam"], 10000)
plt.title("%s (histogram)" % (self.data["name"]))
plt.ylabel("relative frequency")
plt.xlabel("likelihood")
return plt.hist(s, 14, axes=axes)
|
StarcoderdataPython
|
49963
|
# Generated by Django 2.0.3 on 2020-01-31 07:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0003_auto_20200129_0337'),
]
operations = [
migrations.AlterField(
model_name='flight',
name='stripImage',
field=models.ImageField(upload_to=''),
),
]
|
StarcoderdataPython
|
164115
|
from rest_framework import serializers
from procurebd_api.models import *
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = ProfileUser
#fields = ('name', 'ranking')
fields = '__all__'
class ItemSerializer(serializers.ModelSerializer):
class Meta:
model = Item
fields ='__all__'
class VendorSerializer(serializers.ModelSerializer):
class Meta:
model = Vendor
fields ='__all__'
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields ='__all__'
class ReportSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields ='__all__'
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaction
fields ='__all__'
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.