id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
183047
|
# Generated by Django 2.1.8 on 2019-04-15 09:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('froide_crowdfunding', '0004_contribution_public'),
]
operations = [
migrations.AddField(
model_name='crowdfunding',
name='public_interest',
field=models.TextField(blank=True),
),
]
|
StarcoderdataPython
|
3329082
|
import json
from base64 import b64encode
from types import ModuleType
from requests import session
from . import resources
from .constants import URL, Stage
from .utils import capitalize_camel_case
from .version import VERSION
RESOURCE_PREFIX = "_resource_"
RESOURCE_CLASSES = {}
for name, module in resources.__dict__.items():
capitalized_name = capitalize_camel_case(name)
is_module = isinstance(module, ModuleType)
is_in_module = capitalized_name in getattr(module, "__dict__", {})
if is_module and is_in_module:
RESOURCE_CLASSES[name] = module.__dict__[capitalized_name]
class Glovo:
base_url = None
def __init__(self, api_key, api_secret, stage=Stage.PRODUCTION):
"""Initialize a Glovo client object with session.
Also includes optional auth handler and options.
"""
self.api_key = api_key
self.api_secret = api_secret
self.stage = stage
self.session = session()
self._set_auth_string()
self._set_client_headers()
self._set_base_url()
for name, klass in RESOURCE_CLASSES.items():
setattr(self, RESOURCE_PREFIX + name, klass(self))
@staticmethod
def _get_version():
return ".".join(VERSION)
@staticmethod
def _update_request(data, options):
"""Update The resource data and header options."""
data = json.dumps(data)
if "headers" not in options:
options["headers"] = {}
options["headers"].update({"Content-type": "application/json"})
return data, options
def _set_auth_string(self):
raw_auth_string = "{0}:{1}".format(self.api_key, self.api_secret).encode(
"utf-8"
)
self.auth_string = b64encode(raw_auth_string).decode("utf-8")
def _set_client_headers(self):
self.session.headers.update(
{
"User-Agent": "Globo-API-Python/{0}".format(self._get_version()),
"Authorization": "Basic {0}".format(self.auth_string),
"Content-type": "application/json",
"Accept": "application/json",
}
)
def _set_base_url(self):
prefix = URL.PREFIX[self.stage]
self.base_url = URL.BASE_FORMAT.format(prefix=prefix)
def _set_stage(self, stage):
self.stage = stage
self._set_base_url()
def enable_test_mode(self):
self._set_stage(Stage.TEST)
def disable_test_mode(self):
self._set_stage(Stage.PRODUCTION)
def request(self, method, path, **options):
"""Dispatch a request to the Glovo HTTP API."""
url = "{}{}".format(self.base_url, path)
response = getattr(self.session, method)(url, **options)
json_response = response.json()
return {"status": response.status_code, "data": json_response}
def get(self, path, params, **options):
"""Parse GET request options and dispatches a request."""
return self.request("get", path, params=params, **options)
# PATCH method is never used on Glovo resources
# def patch(self, path, data, **options):
# """Parse PATCH request options and dispatches a request."""
# data, options = self._update_request(data, options)
# return self.request("patch", path, data=data, **options)
def post(self, path, data, **options):
"""Parse POST request options and dispatches a request."""
data, options = self._update_request(data, options)
return self.request("post", path, data=data, **options)
# DELETE method is never used on Glovo resources
# def delete(self, path, data, **options):
# """Parse DELETE request options and dispatches a request."""
# data, options = self._update_request(data, options)
# return self.request("delete", path, data=data, **options)
# PUT method is never used on Glovo resources
# def put(self, path, data, **options):
# """Parse PUT request options and dispatches a request."""
# data, options = self._update_request(data, options)
# return self.request("put", path, data=data, **options)
def __getattr__(self, name):
# This method will be called if the standar accesos for a property
# named `name` fails. I this situation if the propery name not start
# with `RESOURCE_PREFIX` ...
if not name.startswith(RESOURCE_PREFIX):
# ... we will try to get the prefixed version of the attribute
# name
return getattr(self, RESOURCE_PREFIX + name)
return super(Glovo, self).__getattribute__(name)
|
StarcoderdataPython
|
3270178
|
import databench
class Parameters(databench.Analysis):
@databench.on
def test_fn(self, first_param, second_param=100):
"""Echo params."""
yield self.emit('test_fn', (first_param, second_param))
@databench.on
def test_action(self):
"""process an action without a message"""
yield self.emit('test_action_ack')
@databench.on
def test_state(self, key, value):
"""Store some test data."""
yield self.set_state({key: value})
@databench.on
def test_set_data(self, key, value):
"""Store some test data."""
yield self.data.set(key, value)
@databench.on
def test_class_data(self, key, value):
"""Store key-value in class data."""
yield self.class_data.set(key, value)
|
StarcoderdataPython
|
3244840
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Source file for floating builder testcases."""
import calendar
import datetime
import itertools
import os
import time
import unittest
import test_env # pylint: disable=W0611,W0403
import mock
from master import floating_builder as fb
def _to_timestamp(dt):
# Calculate the offset between local timezone and UTC.
current_time = time.mktime(dt.timetuple())
offset = (datetime.datetime.fromtimestamp(current_time) -
datetime.datetime.utcfromtimestamp(current_time))
return calendar.timegm((dt - offset).timetuple())
class _FakeSlaveStatus(object):
def __init__(self, name):
self.name = name
self.connect_times = []
self.last_message_received = None
def lastMessageReceived(self):
return self.last_message_received
class _FakeSlave(object):
def __init__(self, slavename):
self.slavename = slavename
self.slave_status = None
self.offline = False
def _set_last_seen(self, now, **kwargs):
td = datetime.timedelta(**kwargs)
self.slave_status = _FakeSlaveStatus(self.slavename)
self.slave_status.last_message_received = _to_timestamp(now + td)
def __str__(self):
return self.slavename
class _FakeBuilder(object):
def __init__(self, name, slaves):
self.name = name
self._all_slaves = slaves
self.botmaster = mock.MagicMock()
self.builder_status = mock.MagicMock()
self.builder_status.getSlaves.side_effect = lambda: [
s.slave_status for s in self._all_slaves
if s.slave_status]
self._online_slaves = ()
self._busy_slaves = ()
def __repr__(self):
return self.name
@property
def slaves(self):
return [_FakeSlaveBuilder(s, self)
for s in self._all_slaves
if s.slavename in self._online_slaves]
@property
def slavebuilders(self):
"""Returns the list of slavebuilders that would be handed to
NextSlaveFunc.
This is the set of slaves that are available for scheduling. We derive
this by returning all slaves that are both online and not busy.
"""
return self._get_slave_builders(lambda s:
s.slavename in self._online_slaves and
s.slavename not in self._busy_slaves)
def _get_slave_builders(self, fn):
return [_FakeSlaveBuilder(slave, self)
for slave in self._all_slaves
if fn(slave)]
def set_online_slaves(self, *slavenames):
self._online_slaves = set(slavenames)
def set_busy_slaves(self, *slavenames):
self._busy_slaves = set(slavenames)
class _FakeSlaveBuilder(object):
def __init__(self, slave, builder):
self.slave = slave
self.builder = builder
def __repr__(self):
return '{%s/%s}' % (self.builder.name, self.slave.slavename)
class FloatingBuilderTest(unittest.TestCase):
def setUp(self):
self._mocks = (
mock.patch('master.floating_builder._get_now'),
mock.patch('master.floating_builder.PokeBuilderTimer.reset'),
)
for patcher in self._mocks:
patcher.start()
# Mock current date/time.
self.now = datetime.datetime(2016, 1, 1, 8, 0, 0) # 1/1/2016 @8:00
fb._get_now.side_effect = lambda: self.now
# Mock PokeBuilderTimer to record when the poke builder was set, but not
# actually schedule any reactor magic.
self.poke_delta = None
def record_poke_delta(delta):
self.poke_delta = delta
fb.PokeBuilderTimer.reset.side_effect = record_poke_delta
self._slaves = dict((s, _FakeSlave(s)) for s in (
'primary-a', 'primary-b', 'floating-a', 'floating-b',
))
self.builder = _FakeBuilder(
'Test Builder',
[s[1] for s in sorted(self._slaves.iteritems())],
)
def tearDown(self):
for patcher in reversed(self._mocks):
patcher.stop()
def testJustStartedNoPrimariesOnlineWaits(self):
fs = fb.FloatingSet()
fs.AddPrimary('primary-a')
fs.AddFloating('floating-a', 'floating-b')
fnsf = fs.NextSlaveFunc(datetime.timedelta(seconds=10))
self.builder.set_online_slaves('floating-a', 'floating-b')
nsb = fnsf(self.builder, self.builder.slavebuilders)
self.assertIsNone(nsb)
self.assertEqual(self.poke_delta, datetime.timedelta(seconds=10))
self.now += datetime.timedelta(seconds=11)
nsb = fnsf(self.builder, self.builder.slavebuilders)
self.assertIsNotNone(nsb)
self.assertEqual(nsb.slave.slavename, 'floating-a')
def testPrimaryBuilderIsSelectedWhenAvailable(self):
fs = fb.FloatingSet()
fs.AddPrimary('primary-a')
fs.AddFloating('floating-a', 'floating-b')
fnsf = fs.NextSlaveFunc(datetime.timedelta(seconds=10))
self.builder.set_online_slaves('primary-a', 'floating-a', 'floating-b')
nsb = fnsf(self.builder, self.builder.slavebuilders)
self.assertIsNotNone(nsb)
self.assertEqual(nsb.slave.slavename, 'primary-a')
def testPrimaryBuilderIsSelectedWhenOneIsAvailableAndOneIsBusy(self):
fs = fb.FloatingSet()
fs.AddPrimary('primary-a', 'primary-b')
fs.AddFloating('floating-a', 'floating-b')
fnsf = fs.NextSlaveFunc(datetime.timedelta(seconds=10))
self.builder.set_online_slaves('primary-a', 'primary-b', 'floating-a',
'floating-b')
self.builder.set_busy_slaves('primary-a')
nsb = fnsf(self.builder, self.builder.slavebuilders)
self.assertIsNotNone(nsb)
self.assertEqual(nsb.slave.slavename, 'primary-b')
def testNoBuilderIsSelectedWhenPrimariesAreOfflineWithinGrace(self):
fs = fb.FloatingSet()
fs.AddPrimary('primary-a', 'primary-b')
fs.AddFloating('floating-a', 'floating-b')
fnsf = fs.NextSlaveFunc(datetime.timedelta(seconds=10))
self.now += datetime.timedelta(seconds=30)
self.builder.set_online_slaves('floating-a')
self._slaves['primary-b']._set_last_seen(self.now, seconds=-1)
nsb = fnsf(self.builder, self.builder.slavebuilders)
self.assertIsNone(nsb)
self.assertEqual(self.poke_delta, datetime.timedelta(seconds=9))
def testFloatingBuilderIsSelectedWhenPrimariesAreOfflineForAWhile(self):
fs = fb.FloatingSet()
fs.AddPrimary('primary-a', 'primary-b')
fs.AddFloating('floating-a', 'floating-b')
fnsf = fs.NextSlaveFunc(datetime.timedelta(seconds=10))
self.now += datetime.timedelta(seconds=30)
self.builder.set_online_slaves('floating-a')
nsb = fnsf(self.builder, self.builder.slavebuilders)
self.assertIsNotNone(nsb)
self.assertEqual(nsb.slave.slavename, 'floating-a')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3229461
|
<filename>venv/lib/python3.7/site-packages/scapy/modules/p0f.py
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Clone of p0f passive OS fingerprinting
"""
from __future__ import absolute_import
from __future__ import print_function
import time
import struct
import os
import socket
import random
from scapy.data import KnowledgeBase
from scapy.config import conf
from scapy.compat import raw
from scapy.layers.inet import IP, TCP, TCPOptions
from scapy.packet import NoPayload, Packet
from scapy.error import warning, Scapy_Exception, log_runtime
from scapy.volatile import RandInt, RandByte, RandNum, RandShort, RandString
from scapy.sendrecv import sniff
from scapy.modules import six
from scapy.modules.six.moves import map, range
if conf.route is None:
# unused import, only to initialize conf.route
import scapy.route # noqa: F401
conf.p0f_base = "/etc/p0f/p0f.fp"
conf.p0fa_base = "/etc/p0f/p0fa.fp"
conf.p0fr_base = "/etc/p0f/p0fr.fp"
conf.p0fo_base = "/etc/p0f/p0fo.fp"
###############
# p0f stuff #
###############
# File format (according to p0f.fp) :
#
# wwww:ttt:D:ss:OOO...:QQ:OS:Details
#
# wwww - window size
# ttt - initial TTL
# D - don't fragment bit (0=unset, 1=set)
# ss - overall SYN packet size
# OOO - option value and order specification
# QQ - quirks list
# OS - OS genre
# details - OS description
class p0fKnowledgeBase(KnowledgeBase):
def __init__(self, filename):
KnowledgeBase.__init__(self, filename)
# self.ttl_range=[255]
def lazy_init(self):
try:
f = open(self.filename)
except IOError:
warning("Can't open base %s", self.filename)
return
try:
self.base = []
for line in f:
if line[0] in ["#", "\n"]:
continue
line = tuple(line.split(":"))
if len(line) < 8:
continue
def a2i(x):
if x.isdigit():
return int(x)
return x
li = [a2i(e) for e in line[1:4]]
# if li[0] not in self.ttl_range:
# self.ttl_range.append(li[0])
# self.ttl_range.sort()
self.base.append((line[0], li[0], li[1], li[2], line[4],
line[5], line[6], line[7][:-1]))
except Exception:
warning("Can't parse p0f database (new p0f version ?)")
self.base = None
f.close()
p0f_kdb, p0fa_kdb, p0fr_kdb, p0fo_kdb = None, None, None, None
def p0f_load_knowledgebases():
global p0f_kdb, p0fa_kdb, p0fr_kdb, p0fo_kdb
p0f_kdb = p0fKnowledgeBase(conf.p0f_base)
p0fa_kdb = p0fKnowledgeBase(conf.p0fa_base)
p0fr_kdb = p0fKnowledgeBase(conf.p0fr_base)
p0fo_kdb = p0fKnowledgeBase(conf.p0fo_base)
p0f_load_knowledgebases()
def p0f_selectdb(flags):
# tested flags: S, R, A
if flags & 0x16 == 0x2:
# SYN
return p0f_kdb
elif flags & 0x16 == 0x12:
# SYN/ACK
return p0fa_kdb
elif flags & 0x16 in [0x4, 0x14]:
# RST RST/ACK
return p0fr_kdb
elif flags & 0x16 == 0x10:
# ACK
return p0fo_kdb
else:
return None
def packet2p0f(pkt):
pkt = pkt.copy()
pkt = pkt.__class__(raw(pkt))
while pkt.haslayer(IP) and pkt.haslayer(TCP):
pkt = pkt.getlayer(IP)
if isinstance(pkt.payload, TCP):
break
pkt = pkt.payload
if not isinstance(pkt, IP) or not isinstance(pkt.payload, TCP):
raise TypeError("Not a TCP/IP packet")
# if pkt.payload.flags & 0x7 != 0x02: #S,!F,!R
# raise TypeError("Not a SYN or SYN/ACK packet")
db = p0f_selectdb(pkt.payload.flags)
# t = p0f_kdb.ttl_range[:]
# t += [pkt.ttl]
# t.sort()
# ttl=t[t.index(pkt.ttl)+1]
ttl = pkt.ttl
ss = len(pkt)
# from p0f/config.h : PACKET_BIG = 100
if ss > 100:
if db == p0fr_kdb:
# p0fr.fp: "Packet size may be wildcarded. The meaning of
# wildcard is, however, hardcoded as 'size >
# PACKET_BIG'"
ss = '*'
else:
ss = 0
if db == p0fo_kdb:
# p0fo.fp: "Packet size MUST be wildcarded."
ss = '*'
ooo = ""
mss = -1
qqT = False
qqP = False
# qqBroken = False
ilen = (pkt.payload.dataofs << 2) - 20 # from p0f.c
for option in pkt.payload.options:
ilen -= 1
if option[0] == "MSS":
ooo += "M" + str(option[1]) + ","
mss = option[1]
# FIXME: qqBroken
ilen -= 3
elif option[0] == "WScale":
ooo += "W" + str(option[1]) + ","
# FIXME: qqBroken
ilen -= 2
elif option[0] == "Timestamp":
if option[1][0] == 0:
ooo += "T0,"
else:
ooo += "T,"
if option[1][1] != 0:
qqT = True
ilen -= 9
elif option[0] == "SAckOK":
ooo += "S,"
ilen -= 1
elif option[0] == "NOP":
ooo += "N,"
elif option[0] == "EOL":
ooo += "E,"
if ilen > 0:
qqP = True
else:
if isinstance(option[0], str):
ooo += "?%i," % TCPOptions[1][option[0]]
else:
ooo += "?%i," % option[0]
# FIXME: ilen
ooo = ooo[:-1]
if ooo == "":
ooo = "."
win = pkt.payload.window
if mss != -1:
if mss != 0 and win % mss == 0:
win = "S" + str(win / mss)
elif win % (mss + 40) == 0:
win = "T" + str(win / (mss + 40))
win = str(win)
qq = ""
if db == p0fr_kdb:
if pkt.payload.flags & 0x10 == 0x10:
# p0fr.fp: "A new quirk, 'K', is introduced to denote
# RST+ACK packets"
qq += "K"
# The two next cases should also be only for p0f*r*, but although
# it's not documented (or I have not noticed), p0f seems to
# support the '0' and 'Q' quirks on any databases (or at the least
# "classical" p0f.fp).
if pkt.payload.seq == pkt.payload.ack:
# p0fr.fp: "A new quirk, 'Q', is used to denote SEQ number
# equal to ACK number."
qq += "Q"
if pkt.payload.seq == 0:
# p0fr.fp: "A new quirk, '0', is used to denote packets
# with SEQ number set to 0."
qq += "0"
if qqP:
qq += "P"
if pkt.id == 0:
qq += "Z"
if pkt.options != []:
qq += "I"
if pkt.payload.urgptr != 0:
qq += "U"
if pkt.payload.reserved != 0:
qq += "X"
if pkt.payload.ack != 0:
qq += "A"
if qqT:
qq += "T"
if db == p0fo_kdb:
if pkt.payload.flags & 0x20 != 0:
# U
# p0fo.fp: "PUSH flag is excluded from 'F' quirk checks"
qq += "F"
else:
if pkt.payload.flags & 0x28 != 0:
# U or P
qq += "F"
if db != p0fo_kdb and not isinstance(pkt.payload.payload, NoPayload):
# p0fo.fp: "'D' quirk is not checked for."
qq += "D"
# FIXME : "!" - broken options segment: not handled yet
if qq == "":
qq = "."
return (db, (win, ttl, pkt.flags.DF, ss, ooo, qq))
def p0f_correl(x, y):
d = 0
# wwww can be "*" or "%nn". "Tnn" and "Snn" should work fine with
# the x[0] == y[0] test.
d += (x[0] == y[0] or y[0] == "*" or (y[0][0] == "%" and x[0].isdigit() and (int(x[0]) % int(y[0][1:])) == 0)) # noqa: E501
# ttl
d += (y[1] >= x[1] and y[1] - x[1] < 32)
for i in [2, 5]:
d += (x[i] == y[i] or y[i] == '*')
# '*' has a special meaning for ss
d += x[3] == y[3]
xopt = x[4].split(",")
yopt = y[4].split(",")
if len(xopt) == len(yopt):
same = True
for i in range(len(xopt)):
if not (xopt[i] == yopt[i] or
(len(yopt[i]) == 2 and len(xopt[i]) > 1 and
yopt[i][1] == "*" and xopt[i][0] == yopt[i][0]) or
(len(yopt[i]) > 2 and len(xopt[i]) > 1 and
yopt[i][1] == "%" and xopt[i][0] == yopt[i][0] and
int(xopt[i][1:]) % int(yopt[i][2:]) == 0)):
same = False
break
if same:
d += len(xopt)
return d
@conf.commands.register
def p0f(pkt):
"""Passive OS fingerprinting: which OS emitted this TCP packet ?
p0f(packet) -> accuracy, [list of guesses]
"""
db, sig = packet2p0f(pkt)
if db:
pb = db.get_base()
else:
pb = []
if not pb:
warning("p0f base empty.")
return []
# s = len(pb[0][0])
r = []
max = len(sig[4].split(",")) + 5
for b in pb:
d = p0f_correl(sig, b)
if d == max:
r.append((b[6], b[7], b[1] - pkt[IP].ttl))
return r
def prnp0f(pkt):
"""Calls p0f and returns a user-friendly output"""
# we should print which DB we use
try:
r = p0f(pkt)
except Exception:
return
if r == []:
r = ("UNKNOWN", "[" + ":".join(map(str, packet2p0f(pkt)[1])) + ":?:?]", None) # noqa: E501
else:
r = r[0]
uptime = None
try:
uptime = pkt2uptime(pkt)
except Exception:
pass
if uptime == 0:
uptime = None
res = pkt.sprintf("%IP.src%:%TCP.sport% - " + r[0] + " " + r[1])
if uptime is not None:
res += pkt.sprintf(" (up: " + str(uptime / 3600) + " hrs)\n -> %IP.dst%:%TCP.dport% (%TCP.flags%)") # noqa: E501
else:
res += pkt.sprintf("\n -> %IP.dst%:%TCP.dport% (%TCP.flags%)")
if r[2] is not None:
res += " (distance " + str(r[2]) + ")"
print(res)
@conf.commands.register
def pkt2uptime(pkt, HZ=100):
"""Calculate the date the machine which emitted the packet booted using TCP timestamp # noqa: E501
pkt2uptime(pkt, [HZ=100])"""
if not isinstance(pkt, Packet):
raise TypeError("Not a TCP packet")
if isinstance(pkt, NoPayload):
raise TypeError("Not a TCP packet")
if not isinstance(pkt, TCP):
return pkt2uptime(pkt.payload)
for opt in pkt.options:
if opt[0] == "Timestamp":
# t = pkt.time - opt[1][0] * 1.0/HZ
# return time.ctime(t)
t = opt[1][0] / HZ
return t
raise TypeError("No timestamp option")
def p0f_impersonate(pkt, osgenre=None, osdetails=None, signature=None,
extrahops=0, mtu=1500, uptime=None):
"""Modifies pkt so that p0f will think it has been sent by a
specific OS. If osdetails is None, then we randomly pick up a
personality matching osgenre. If osgenre and signature are also None,
we use a local signature (using p0f_getlocalsigs). If signature is
specified (as a tuple), we use the signature.
For now, only TCP Syn packets are supported.
Some specifications of the p0f.fp file are not (yet) implemented."""
pkt = pkt.copy()
# pkt = pkt.__class__(raw(pkt))
while pkt.haslayer(IP) and pkt.haslayer(TCP):
pkt = pkt.getlayer(IP)
if isinstance(pkt.payload, TCP):
break
pkt = pkt.payload
if not isinstance(pkt, IP) or not isinstance(pkt.payload, TCP):
raise TypeError("Not a TCP/IP packet")
db = p0f_selectdb(pkt.payload.flags)
if osgenre:
pb = db.get_base()
if pb is None:
pb = []
pb = [x for x in pb if x[6] == osgenre]
if osdetails:
pb = [x for x in pb if x[7] == osdetails]
elif signature:
pb = [signature]
else:
pb = p0f_getlocalsigs()[db]
if db == p0fr_kdb:
# 'K' quirk <=> RST+ACK
if pkt.payload.flags & 0x4 == 0x4:
pb = [x for x in pb if 'K' in x[5]]
else:
pb = [x for x in pb if 'K' not in x[5]]
if not pb:
raise Scapy_Exception("No match in the p0f database")
pers = pb[random.randint(0, len(pb) - 1)]
# options (we start with options because of MSS)
# Take the options already set as "hints" to use in the new packet if we
# can. MSS, WScale and Timestamp can all be wildcarded in a signature, so
# we'll use the already-set values if they're valid integers.
orig_opts = dict(pkt.payload.options)
int_only = lambda val: val if isinstance(val, six.integer_types) else None
mss_hint = int_only(orig_opts.get('MSS'))
wscale_hint = int_only(orig_opts.get('WScale'))
ts_hint = [int_only(o) for o in orig_opts.get('Timestamp', (None, None))]
options = []
if pers[4] != '.':
for opt in pers[4].split(','):
if opt[0] == 'M':
# MSS might have a maximum size because of window size
# specification
if pers[0][0] == 'S':
maxmss = (2**16 - 1) // int(pers[0][1:])
else:
maxmss = (2**16 - 1)
# disregard hint if out of range
if mss_hint and not 0 <= mss_hint <= maxmss:
mss_hint = None
# If we have to randomly pick up a value, we cannot use
# scapy RandXXX() functions, because the value has to be
# set in case we need it for the window size value. That's
# why we use random.randint()
if opt[1:] == '*':
if mss_hint is not None:
options.append(('MSS', mss_hint))
else:
options.append(('MSS', random.randint(1, maxmss)))
elif opt[1] == '%':
coef = int(opt[2:])
if mss_hint is not None and mss_hint % coef == 0:
options.append(('MSS', mss_hint))
else:
options.append((
'MSS', coef * random.randint(1, maxmss // coef)))
else:
options.append(('MSS', int(opt[1:])))
elif opt[0] == 'W':
if wscale_hint and not 0 <= wscale_hint < 2**8:
wscale_hint = None
if opt[1:] == '*':
if wscale_hint is not None:
options.append(('WScale', wscale_hint))
else:
options.append(('WScale', RandByte()))
elif opt[1] == '%':
coef = int(opt[2:])
if wscale_hint is not None and wscale_hint % coef == 0:
options.append(('WScale', wscale_hint))
else:
options.append((
'WScale', coef * RandNum(min=1, max=(2**8 - 1) // coef))) # noqa: E501
else:
options.append(('WScale', int(opt[1:])))
elif opt == 'T0':
options.append(('Timestamp', (0, 0)))
elif opt == 'T':
# Determine first timestamp.
if uptime is not None:
ts_a = uptime
elif ts_hint[0] and 0 < ts_hint[0] < 2**32:
# Note: if first ts is 0, p0f registers it as "T0" not "T",
# hence we don't want to use the hint if it was 0.
ts_a = ts_hint[0]
else:
ts_a = random.randint(120, 100 * 60 * 60 * 24 * 365)
# Determine second timestamp.
if 'T' not in pers[5]:
ts_b = 0
elif ts_hint[1] and 0 < ts_hint[1] < 2**32:
ts_b = ts_hint[1]
else:
# FIXME: RandInt() here does not work (bug (?) in
# TCPOptionsField.m2i often raises "OverflowError:
# long int too large to convert to int" in:
# oval = struct.pack(ofmt, *oval)"
# Actually, this is enough to often raise the error:
# struct.pack('I', RandInt())
ts_b = random.randint(1, 2**32 - 1)
options.append(('Timestamp', (ts_a, ts_b)))
elif opt == 'S':
options.append(('SAckOK', ''))
elif opt == 'N':
options.append(('NOP', None))
elif opt == 'E':
options.append(('EOL', None))
elif opt[0] == '?':
if int(opt[1:]) in TCPOptions[0]:
optname = TCPOptions[0][int(opt[1:])][0]
optstruct = TCPOptions[0][int(opt[1:])][1]
options.append((optname,
struct.unpack(optstruct,
RandString(struct.calcsize(optstruct))._fix()))) # noqa: E501
else:
options.append((int(opt[1:]), ''))
# FIXME: qqP not handled
else:
warning("unhandled TCP option " + opt)
pkt.payload.options = options
# window size
if pers[0] == '*':
pkt.payload.window = RandShort()
elif pers[0].isdigit():
pkt.payload.window = int(pers[0])
elif pers[0][0] == '%':
coef = int(pers[0][1:])
pkt.payload.window = coef * RandNum(min=1, max=(2**16 - 1) // coef)
elif pers[0][0] == 'T':
pkt.payload.window = mtu * int(pers[0][1:])
elif pers[0][0] == 'S':
# needs MSS set
mss = [x for x in options if x[0] == 'MSS']
if not mss:
raise Scapy_Exception("TCP window value requires MSS, and MSS option not set") # noqa: E501
pkt.payload.window = mss[0][1] * int(pers[0][1:])
else:
raise Scapy_Exception('Unhandled window size specification')
# ttl
pkt.ttl = pers[1] - extrahops
# DF flag
pkt.flags |= (2 * pers[2])
# FIXME: ss (packet size) not handled (how ? may be with D quirk
# if present)
# Quirks
if pers[5] != '.':
for qq in pers[5]:
# FIXME: not handled: P, I, X, !
# T handled with the Timestamp option
if qq == 'Z':
pkt.id = 0
elif qq == 'U':
pkt.payload.urgptr = RandShort()
elif qq == 'A':
pkt.payload.ack = RandInt()
elif qq == 'F':
if db == p0fo_kdb:
pkt.payload.flags |= 0x20 # U
else:
pkt.payload.flags |= random.choice([8, 32, 40]) # P/U/PU
elif qq == 'D' and db != p0fo_kdb:
pkt /= conf.raw_layer(load=RandString(random.randint(1, 10))) # XXX p0fo.fp # noqa: E501
elif qq == 'Q':
pkt.payload.seq = pkt.payload.ack
# elif qq == '0': pkt.payload.seq = 0
# if db == p0fr_kdb:
# '0' quirk is actually not only for p0fr.fp (see
# packet2p0f())
if '0' in pers[5]:
pkt.payload.seq = 0
elif pkt.payload.seq == 0:
pkt.payload.seq = RandInt()
while pkt.underlayer:
pkt = pkt.underlayer
return pkt
def p0f_getlocalsigs():
"""This function returns a dictionary of signatures indexed by p0f
db (e.g., p0f_kdb, p0fa_kdb, ...) for the local TCP/IP stack.
You need to have your firewall at least accepting the TCP packets
from/to a high port (30000 <= x <= 40000) on your loopback interface.
Please note that the generated signatures come from the loopback
interface and may (are likely to) be different than those generated on
"normal" interfaces."""
pid = os.fork()
port = random.randint(30000, 40000)
if pid > 0:
# parent: sniff
result = {}
def addresult(res):
# TODO: wildcard window size in some cases? and maybe some
# other values?
if res[0] not in result:
result[res[0]] = [res[1]]
else:
if res[1] not in result[res[0]]:
result[res[0]].append(res[1])
# XXX could we try with a "normal" interface using other hosts
iface = conf.route.route('127.0.0.1')[0]
# each packet is seen twice: S + RA, S + SA + A + FA + A
# XXX are the packets also seen twice on non Linux systems ?
count = 14
pl = sniff(iface=iface, filter='tcp and port ' + str(port), count=count, timeout=3) # noqa: E501
for pkt in pl:
for elt in packet2p0f(pkt):
addresult(elt)
os.waitpid(pid, 0)
elif pid < 0:
log_runtime.error("fork error")
else:
# child: send
# XXX erk
time.sleep(1)
s1 = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
# S & RA
try:
s1.connect(('127.0.0.1', port))
except socket.error:
pass
# S, SA, A, FA, A
s1.bind(('127.0.0.1', port))
s1.connect(('127.0.0.1', port))
# howto: get an RST w/o ACK packet
s1.close()
os._exit(0)
return result
|
StarcoderdataPython
|
1776476
|
import unittest
from solutions.TST import one
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(one.get(), 1)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
154487
|
from requests.auth import HTTPBasicAuth
def apply_updates(doc, update_dict):
# updates the doc with items from the dict
# returns whether or not any updates were made
should_save = False
for key, value in update_dict.items():
if getattr(doc, key, None) != value:
setattr(doc, key, value)
should_save = True
return should_save
class EndpointMixin(object):
@classmethod
def from_config(cls, config):
return cls(config.url, config.username, config.password)
def _auth(self):
return HTTPBasicAuth(self.username, self.password)
def _urlcombine(self, base, target):
return '{base}{target}'.format(base=base, target=target)
|
StarcoderdataPython
|
39030
|
<filename>hermione/module_templates/__IMPLEMENTED_BASE__/src/ml/preprocessing/preprocessing.py
import pandas as pd
from ml.preprocessing.normalization import Normalizer
from category_encoders import *
import logging
logging.getLogger().setLevel(logging.INFO)
class Preprocessing:
"""
Class to perform data preprocessing before training
"""
def clean_data(self, df: pd.DataFrame):
"""
Perform data cleansing.
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""
logging.info("Cleaning data")
df_copy = df.copy()
df_copy['Pclass'] = df_copy.Pclass.astype('object')
df_copy = df_copy.dropna()
return df_copy
def categ_encoding(self, df: pd.DataFrame):
"""
Perform encoding of the categorical variables
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""
logging.info("Category encoding")
df_copy = df.copy()
df_copy = pd.get_dummies(df_copy)
return df_copy
|
StarcoderdataPython
|
1608688
|
<filename>tests/ea/plotters/progress/conftest.py
import pandas as pd
import pytest
import stk
from .case_data import CaseData
def _get_topology_graph() -> stk.polymer.Linear:
return stk.polymer.Linear(
building_blocks=(
stk.BuildingBlock('BrCCBr', [stk.BromoFactory()]),
),
repeating_unit='A',
num_repeating_units=2,
)
def get_generation(*fitness_values):
v1, v2, v3, *_ = fitness_values
topology_graph = _get_topology_graph()
return (
stk.MoleculeRecord(
topology_graph=topology_graph,
).with_fitness_value(v1),
stk.MoleculeRecord(
topology_graph=topology_graph,
).with_fitness_value(v2),
stk.MoleculeRecord(
topology_graph=topology_graph,
).with_fitness_value(v3),
stk.MoleculeRecord(
topology_graph=topology_graph,
)
)
@pytest.fixture(
scope='session',
params=(
lambda: CaseData(
plotter=stk.ProgressPlotter(
generations=(
stk.Generation(
molecule_records=get_generation(0, 1, 2),
mutation_records=(),
crossover_records=(),
),
stk.Generation(
molecule_records=get_generation(10, 20, 30),
mutation_records=(),
crossover_records=(),
),
stk.Generation(
molecule_records=get_generation(40, 50, 60),
mutation_records=(),
crossover_records=(),
),
stk.Generation(
molecule_records=get_generation(40, 50, 60),
mutation_records=(),
crossover_records=(),
),
stk.Generation(
molecule_records=get_generation(70, 80, 90),
mutation_records=(),
crossover_records=(),
),
),
get_property=lambda record: record.get_fitness_value(),
y_label='Fitness Value',
filter=lambda record:
record.get_fitness_value() is not None,
),
plot_data=pd.DataFrame({
'Generation': [0]*3 + [1]*3 + [2]*3 + [3]*3 + [4]*3,
'Fitness Value': [
2., 1., 0.,
30., 20., 10.,
60., 50., 40.,
60., 50., 40.,
90., 80., 70.,
],
'Type': ['Max', 'Mean', 'Min']*5
}),
),
),
)
def case_data(request) -> CaseData:
return request.param()
|
StarcoderdataPython
|
174501
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import edward as ed
from edward.models import Normal, Empirical
from scipy.special import erf
import importlib
import utils
importlib.reload(utils)
from utils import *
class hmc_model:
def __init__(self, activation_fn, data_noise,
b_0_var=1., w_0_var=1., u_var=1., g_var=1.,
hidden_size = 100,
step_size=0.001, n_steps=40, n_samples=1000, burn_in=200, n_predict=50, deep_NN = False):
''' create object that will be a Bayesian NN w inference done by HMC '''
self.name_ = 'hmc_NN_h' + str(hidden_size)
self.activation_fn = activation_fn
self.data_noise = data_noise
self.hidden_size = hidden_size
self.deep_NN = deep_NN
# inference params
self.step_size = step_size # size of steps
self.n_steps = n_steps # no steps in between samples
self.n_samples = n_samples # no samples to collect
self.burn_in = burn_in # drop this number of burn in samples
self.n_predict = n_predict # take this number of when doing predictions
if self.n_samples < self.burn_in:
raise Exception('no. samples is less than burn in samples!')
if self.deep_NN == True:
print('going deep...')
# variance for step fn, relu, erf
self.b_0_var = b_0_var # first layer bias variance
self.w_0_var = w_0_var # first layer weight variance
# variance for rbf - we use williams 1996 notation
# i.e. node = exp(-(x-u)^2 / 2*var_g)
self.g_var = g_var # param of rbf fn (fixed)
self.u_var = u_var # var of centers, as -> inf, goes to stationary cov dist
return
def train(self, X_train, y_train, X_val, is_print=True):
''' set up BNN and run HMC inference '''
def neural_network(X):
# set up the BNN structure using tf
if self.activation_fn == 'relu':
h = tf.maximum(tf.matmul(X, W_0) + b_0,0) # relu
elif self.activation_fn == 'Lrelu':
a=0.2
h = tf.maximum(tf.matmul(X, W_0) + b_0,a*(tf.matmul(X, W_0) + b_0)) # leakly relu
elif self.activation_fn == 'erf':
h = tf.erf(tf.matmul(X, W_0) + b_0)
elif self.activation_fn == 'tanh':
h = tf.tanh(tf.matmul(X, W_0) + b_0)
# h = tf.tanh(1.23*tf.matmul(X, W_0) + b_0) # add 1.23 for close to GP erf
elif self.activation_fn == 'sigmoid':
h = tf.sigmoid(tf.matmul(X, W_0) + b_0)
elif self.activation_fn == 'softplus':
self.c=2. # if this is bigger -> relu behaviour, but less 'soft'
h = tf.divide(tf.log(tf.exp(tf.multiply(tf.matmul(X, W_0) + b_0,c)) + 1),c)
elif self.activation_fn == 'rbf':
self.beta_2 = 1/(2*self.g_var)
h = tf.exp(-self.beta_2*tf.square(X - W_0))
h = tf.matmul(h, W_1) #+ b_1
return tf.reshape(h, [-1])
def neural_network_deep(X):
# set up the BNN structure using tf
if self.activation_fn == 'relu':
h1 = tf.maximum(tf.matmul(X, W_0) + b_0,0) # relu
h = tf.maximum(tf.matmul(h1, W_1) + b_1,0) # relu
elif self.activation_fn == 'Lrelu':
a=0.2
h1 = tf.maximum(tf.matmul(X, W_0) + b_0,a*(tf.matmul(X, W_0) + b_0)) # leakly relu
h = tf.maximum(tf.matmul(h1, W_1) + b_1,a*(tf.matmul(h1, W_1) + b_1)) # leakly relu
elif self.activation_fn == 'erf':
h1 = tf.erf(tf.matmul(X, W_0) + b_0)
h = tf.erf(tf.matmul(h1, W_1) + b_1)
else:
raise Exception('tp: activation not implemented')
h = tf.matmul(h, W_2) #+ b_2
return tf.reshape(h, [-1])
if self.activation_fn == 'relu' or self.activation_fn == 'softplus' or self.activation_fn == 'Lrelu':
init_stddev_0_w = np.sqrt(self.w_0_var) # /d_in
init_stddev_0_b = np.sqrt(self.b_0_var) # /d_in
init_stddev_1_w = 1.0/np.sqrt(self.hidden_size) #*np.sqrt(10) # 2nd layer init. dist
elif self.activation_fn == 'tanh' or self.activation_fn == 'erf':
init_stddev_0_w = np.sqrt(self.w_0_var) # 1st layer init. dist for weights
init_stddev_0_b = np.sqrt(self.b_0_var) # for bias
init_stddev_1_w = 1.0/np.sqrt(self.hidden_size) # 2nd layer init. dist
elif self.activation_fn == 'rbf':
init_stddev_0_w = np.sqrt(self.u_var) # centres = sig_u
init_stddev_0_b = np.sqrt(self.g_var) # fixed /beta
init_stddev_1_w = 1.0/np.sqrt(self.hidden_size) # 2nd layer init. dist
n = X_train.shape[0]
X_dim = X_train.shape[1]
y_dim = 1 #y_train.shape[1]
with tf.name_scope("model"):
W_0 = Normal(loc=tf.zeros([X_dim, self.hidden_size]), scale=init_stddev_0_w*tf.ones([X_dim, self.hidden_size]),
name="W_0")
if self.deep_NN == False:
W_1 = Normal(loc=tf.zeros([self.hidden_size, y_dim]), scale=init_stddev_1_w*tf.ones([self.hidden_size, y_dim]),
name="W_1")
b_0 = Normal(loc=tf.zeros(self.hidden_size), scale=init_stddev_0_b*tf.ones(self.hidden_size),
name="b_0")
b_1 = Normal(loc=tf.zeros(1), scale=tf.ones(1),
name="b_1")
else:
W_1 = Normal(loc=tf.zeros([self.hidden_size, self.hidden_size]), scale=init_stddev_1_w*tf.ones([self.hidden_size, y_dim]),
name="W_1")
b_0 = Normal(loc=tf.zeros(self.hidden_size), scale=init_stddev_0_b*tf.ones(self.hidden_size),
name="b_0")
W_2 = Normal(loc=tf.zeros([self.hidden_size, y_dim]), scale=init_stddev_1_w*tf.ones([self.hidden_size, y_dim]),
name="W_2")
b_1 = Normal(loc=tf.zeros(self.hidden_size), scale=init_stddev_1_w*tf.ones(self.hidden_size),
name="b_1")
b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1),
name="b_2")
X = tf.placeholder(tf.float32, [n, X_dim], name="X")
if self.deep_NN == False:
y = Normal(loc=neural_network(X), scale=np.sqrt(self.data_noise) * tf.ones(n), name="y")
else:
y = Normal(loc=neural_network_deep(X), scale=np.sqrt(self.data_noise) * tf.ones(n), name="y")
# inference
if self.deep_NN == False:
qW_0 = Empirical(tf.Variable(tf.zeros([self.n_samples, X_dim, self.hidden_size])))
qW_1 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size, y_dim])))
qb_0 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size])))
qb_1 = Empirical(tf.Variable(tf.zeros([self.n_samples, y_dim])))
else:
qW_0 = Empirical(tf.Variable(tf.zeros([self.n_samples, X_dim, self.hidden_size])))
qW_1 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size, self.hidden_size])))
qW_2 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size, y_dim])))
qb_0 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size])))
qb_1 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size])))
qb_2 = Empirical(tf.Variable(tf.zeros([self.n_samples, y_dim])))
# get some priors
### !!! TODO, turn this into a proper function
# X_pred = X_val.astype(np.float32).reshape((X_val.shape[0], 1))
# self.y_priors = tf.stack([nn_predict(X_pred, W_0.sample(), W_1.sample(),b_0.sample(), b_1.sample())
# for _ in range(10)])
# Neal 2012
# Too large a stepsize will result in a very low acceptance rate for states
# proposed by simulating trajectories. Too small a stepsize will either waste
# computation time, by the same factor as the stepsize is too small, or (worse)
# will lead to slow exploration by a random walk,
# https://stats.stackexchange.com/questions/304942/how-to-set-step-size-in-hamiltonian-monte-carlo
# If ϵ is too large, then there will be large discretisation error and low acceptance, if ϵ
# is too small then more expensive leapfrog steps will be required to move large distances.
# Ideally we want the largest possible value of ϵ
# that gives reasonable acceptance probability. Unfortunately this may vary for different values of the target variable.
# A simple heuristic to set this may be to do a preliminary run with fixed L,
# gradually increasing ϵ until the acceptance probability is at an appropriate level.
# Setting the trajectory length by trial and error therefore seems necessary.
# For a problem thought to be fairly difficult, a trajectory with L = 100 might be a
# suitable starting point. If preliminary runs (with a suitable ε; see above) show that HMC
# reaches a nearly independent point after only one iteration, a smaller value of L might be
# tried next. (Unless these “preliminary” runs are actually sufficient, in which case there is
# of course no need to do more runs.) If instead there is high autocorrelation in the run
# with L = 100, runs with L = 1000 might be tried next
# It may also be advisable to randomly sample ϵ
# and L form suitable ranges to avoid the possibility of having paths that are close to periodic as this would slow mixing.
if self.deep_NN == False:
inference = ed.HMC({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1},
data={X: X_train, y: y_train.ravel()})
else:
inference = ed.HMC({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1, W_2: qW_2, b_2: qb_2},
data={X: X_train, y: y_train.ravel()})
inference.run(step_size=self.step_size,n_steps=self.n_steps) # logdir='log'
# drop first chunk of burn in samples
if self.deep_NN == False:
self.qW_0_keep = qW_0.params[self.burn_in:].eval()
self.qW_1_keep = qW_1.params[self.burn_in:].eval()
self.qb_0_keep = qb_0.params[self.burn_in:].eval()
self.qb_1_keep = qb_1.params[self.burn_in:].eval()
else:
self.qW_0_keep = qW_0.params[self.burn_in:].eval()
self.qW_1_keep = qW_1.params[self.burn_in:].eval()
self.qb_0_keep = qb_0.params[self.burn_in:].eval()
self.qW_2_keep = qW_2.params[self.burn_in:].eval()
self.qb_1_keep = qb_1.params[self.burn_in:].eval()
self.qb_2_keep = qb_2.params[self.burn_in:].eval()
return
def predict(self, X_pred):
''' do predict on new data '''
def nn_predict_np(X, W_0, W_1, b_0, b_1):
if self.activation_fn == 'relu':
h = np.maximum(np.matmul(X, W_0) + b_0,0)
elif self.activation_fn == 'Lrelu':
a=0.2
h = np.maximum(np.matmul(X, W_0) + b_0,a*(np.matmul(X, W_0) + b_0))
elif self.activation_fn == 'erf':
h = erf(np.matmul(X, W_0) + b_0)
elif self.activation_fn == 'softplus':
h = np.log(1+np.exp(self.c*(np.matmul(X, W_0) + b_0) ))/self.c
elif self.activation_fn == 'tanh':
h = np.tanh(np.matmul(X, W_0) + b_0)
elif self.activation_fn == 'rbf':
h = np.exp(-self.beta_2*np.square(X - W_0))
h = np.matmul(h, W_1) #+ b_1
return np.reshape(h, [-1])
def nn_predict_np_deep(X, W_0, W_1, W_2, b_0, b_1, b_2):
if self.activation_fn == 'relu':
h1 = np.maximum(np.matmul(X, W_0) + b_0,0)
h = np.maximum(np.matmul(h1, W_1) + b_1,0)
elif self.activation_fn == 'Lrelu':
a=0.2
h1 = np.maximum(np.matmul(X, W_0) + b_0,a*(np.matmul(X, W_0) + b_0))
h = np.maximum(np.matmul(h1, W_1) + b_1,a*(np.matmul(h, W_1) + b_1))
elif self.activation_fn == 'erf':
h1 = erf(np.matmul(X, W_0) + b_0)
h = erf(np.matmul(h1, W_1) + b_1)
else:
raise Exception('tp: other activations not implemented')
h = np.matmul(h, W_2) #+ b_2
return np.reshape(h, [-1])
# predictive sampling with burn in
y_preds=[]
print('\nsampling predictions...')
for _ in range(self.n_predict):
# if _%5 == 0:
# print('sampling:',_, 'of', self.n_predict)
if self.n_predict == self.qW_0_keep.shape[0]:
id = _
else:
id = np.random.randint(0,self.qW_0_keep.shape[0]) # sample from posterior
# if sample from same index it will be joint, this is why we don't do sample
# use np instead of tf to speed up!
if self.deep_NN == False:
temp = nn_predict_np(X_pred,self.qW_0_keep[id],self.qW_1_keep[id],self.qb_0_keep[id],self.qb_1_keep[id])
else:
temp = nn_predict_np_deep(X_pred,self.qW_0_keep[id],self.qW_1_keep[id],self.qW_2_keep[id],self.qb_0_keep[id],self.qb_1_keep[id],self.qb_2_keep[id])
y_preds.append(temp)
y_preds = np.array(y_preds)
y_pred_mu = np.mean(y_preds,axis=0)
y_pred_std = np.std(y_preds,axis=0)
y_pred_std = np.sqrt(np.square(y_pred_std) + self.data_noise) # add on data noise
y_pred_mu = np.atleast_2d(y_pred_mu).T
y_pred_std = np.atleast_2d(y_pred_std).T
self.y_pred_mu = y_pred_mu
self.y_pred_std = y_pred_std
return y_preds, y_pred_mu, y_pred_std
|
StarcoderdataPython
|
3384052
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../")
import unittest
import paddle
from paddleslim.dist import merge
from layers import conv_bn_layer
from static_case import StaticCase
class TestMerge(StaticCase):
def test_merge(self):
student_main = paddle.static.Program()
student_startup = paddle.static.Program()
with paddle.static.program_guard(student_main, student_startup):
input = paddle.static.data(name="image", shape=[None, 3, 224, 224])
conv1 = conv_bn_layer(input, 8, 3, "conv1")
conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
student_predict = conv1 + conv2
student_ops = []
for block in student_main.blocks:
for op in block.ops:
student_ops.append(op)
teacher_main = paddle.static.Program()
teacher_startup = paddle.static.Program()
with paddle.static.program_guard(teacher_main, teacher_startup):
input = paddle.static.data(name="image", shape=[None, 3, 224, 224])
conv1 = conv_bn_layer(input, 8, 3, "conv1")
conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
sum1 = conv1 + conv2
conv3 = conv_bn_layer(sum1, 8, 3, "conv3")
conv4 = conv_bn_layer(conv3, 8, 3, "conv4")
sum2 = conv4 + sum1
conv5 = conv_bn_layer(sum2, 8, 3, "conv5")
teacher_predict = conv_bn_layer(conv5, 8, 3, "conv6")
teacher_ops = []
for block in teacher_main.blocks:
for op in block.ops:
teacher_ops.append(op)
place = paddle.CPUPlace()
data_name_map = {'image': 'image'}
merge(teacher_main, student_main, data_name_map, place)
merged_ops = []
for block in student_main.blocks:
for op in block.ops:
merged_ops.append(op)
self.assertTrue(len(student_ops) + len(teacher_ops) == len(merged_ops))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3324600
|
<filename>peacock/configure.py
#!/usr/bin/env python3
from build import ninja_common
build = ninja_common.Build("peacock")
build.generate(["regroup"], "peacock/build-regroup.sh", ["regroup.c"])
build.chicken_lib("fishbowl", [
"fishbowl/queue.scm",
"fishbowl/fishbowl.scm",
], where="peacock/fishbowl")
build.chicken_lib("peacock", [
"peacock.scm",
"peacock-internal.scm",
"peacock-util.scm",
"peacock-misc.scm"
], where="peacock", chicken_deps=["cuauv-shm", "fishbowl"])
build.chicken_exe("peck", [
"peck.scm",
], chicken_deps=["peacock"])
|
StarcoderdataPython
|
64252
|
# pylint: disable=wrong-import-position, wrong-import-order, invalid-name
"""
Invoke build script.
Show all tasks with::
invoke -l
.. seealso::
* http://pyinvoke.org
* https://github.com/pyinvoke/invoke
"""
###############################################################################
# Catch exceptions and go into ipython/ipdb
# import sys
# from IPython.core.debugger import Tracer # noqa
# from IPython.core import ultratb
# sys.excepthook = ultratb.FormattedTB(
# mode="Verbose", color_scheme="Linux", call_pdb=True, ostream=sys.__stdout__
# )
###############################################################################
import logging
from invoke import Collection, Context, Config
from invoke import task
from .constants import ROOT_DIR, PROJECT_BIN_DIR, DATA_DIR, SCRIPT_DIR
from . import local
from . import ci
LOGGER = logging.getLogger()
ns = Collection()
ns.add_collection(local)
ns.add_collection(ci)
# https://github.com/imbrra/logowanie/blob/38a1a38ea9f5b2494e5bc986df651ff9d713fda5/tasks/__init__.py
# TODO: THINK ABOUT USING THESE MODULES https://medium.com/hultner/how-to-write-bash-scripts-in-python-10c34a5c2df1
# TODO: THINK ABOUT USING THESE MODULES https://medium.com/hultner/how-to-write-bash-scripts-in-python-10c34a5c2df1
# TODO: THINK ABOUT USING THESE MODULES https://medium.com/hultner/how-to-write-bash-scripts-in-python-10c34a5c2df1
|
StarcoderdataPython
|
109655
|
<filename>hydra/file.py
import os
class File:
def __init__(self, name=None, location=None):
"""
Create and store information about name and location of file.
Note:
Do not pass the file name and location as parameters if
you want to create a 'main.db' file in your directory.
Examples:
>>> file = File('name', 'directory')
Args:
name (str, optional): Get file name.
location (str, optional): Get file location.
Attributes:
self.name (str): Store file name and if none is given,
set it as 'main'. Also, it cannot be accessed from outside.
self.location (str): Store file directory and if none is given,
set to the current directory. Also, cannot be accessed from
the outside of the class.
self.__full_directory (str): Merge file location and name to create
a full directory address of the SQLite file.
"""
# store the name of the file, default to 'main'
self.__name = name or 'main'
# store the location of the file, default to current directory
self.__location = location or self.current_directory()
# store full directory address of the file
self.__full_directory = f"{self.__location}/{self.__name}.db"
def __str__(self):
"""
String representation of the File class.
Note:
Use the class directly in a print statement or aggregate
to get the full directory of the file as a string.
Returns:
Returns the full directory as a string.
"""
return self.__full_directory
@staticmethod
def current_directory():
"""
Get current project directory.
Note:
For windows users, it will change '\' to '/' automatically
because python refers to directories using forward slash.
Returns:
:return: Returns the current working directory.
"""
return str(os.getcwd().replace('\\', '/'))
|
StarcoderdataPython
|
3276733
|
_msgs = []
def clear():
_msgs.clear()
def get():
return ",".join(_msgs)
def put(msg):
_msgs.append(msg)
|
StarcoderdataPython
|
3296958
|
<reponame>neelpawarcmu/deep-learning-library<filename>homework-3/hw3p1/mytorch/gru_cell.py
import numpy as np
from activation import *
class GRUCell(object):
"""GRU Cell class."""
def __init__(self, in_dim, hidden_dim):
self.d = in_dim
self.h = hidden_dim
h = self.h
d = self.d
self.x_t = 0
self.Wrx = np.random.randn(h, d)
self.Wzx = np.random.randn(h, d)
self.Wnx = np.random.randn(h, d)
self.Wrh = np.random.randn(h, h)
self.Wzh = np.random.randn(h, h)
self.Wnh = np.random.randn(h, h)
self.bir = np.random.randn(h)
self.biz = np.random.randn(h)
self.bin = np.random.randn(h)
self.bhr = np.random.randn(h)
self.bhz = np.random.randn(h)
self.bhn = np.random.randn(h)
self.dWrx = np.zeros((h, d))
self.dWzx = np.zeros((h, d))
self.dWnx = np.zeros((h, d))
self.dWrh = np.zeros((h, h))
self.dWzh = np.zeros((h, h))
self.dWnh = np.zeros((h, h))
self.dbir = np.zeros((h))
self.dbiz = np.zeros((h))
self.dbin = np.zeros((h))
self.dbhr = np.zeros((h))
self.dbhz = np.zeros((h))
self.dbhn = np.zeros((h))
self.r_act = Sigmoid()
self.z_act = Sigmoid()
self.h_act = Tanh()
# Define other variables to store forward results for backward here
def init_weights(self, Wrx, Wzx, Wnx, Wrh, Wzh, Wnh, bir, biz, bin, bhr, bhz, bhn):
self.Wrx = Wrx
self.Wzx = Wzx
self.Wnx = Wnx
self.Wrh = Wrh
self.Wzh = Wzh
self.Wnh = Wnh
self.bir = bir
self.biz = biz
self.bin = bin
self.bhr = bhr
self.bhz = bhz
self.bhn = bhn
def __call__(self, x, h):
return self.forward(x, h)
def forward(self, x, h):
"""GRU cell forward.
Input
-----
x: (input_dim)
observation at current time-step.
h: (hidden_dim)
hidden-state at previous time-step.
Returns
-------
h_t: (hidden_dim)
hidden state at current time-step.
"""
self.x = x
self.hidden = h
# Add your code here.
# Define your variables based on the writeup using the corresponding
# names below.
#IDL lec15 slide22 + hw3p1 writeup pg 9
# # #slides
# # #block 1
term1 = np.dot(self.Wrh, h) + self.bir # (h,h) dot (h,) + (h,) = (h,)
term2 = np.dot(self.Wrx, x) + self.bhr # (h,d) dot (d,) + (h,) = (h,)
self.r = self.r_act(term1 + term2) # activation(h,) = (h,)
#block2
term1 = np.dot(self.Wzh, h) + self.biz # (h,h) dot (h,) + (h,) = (h,)
term2 = np.dot(self.Wzx, x) + self.bhz # (h,d) dot (d,) + (h,) = (h,)
self.z = self.r_act(term1 + term2) # activation(h,) = (h,)
#block3
term1 = np.dot(self.Wnx, x) + self.bin # (h,h) dot (h,) + (h,) = (h,)
term2 = self.r * (np.dot(self.Wnh, h) + self.bhn) # (h,) * (h,h) dot (h,) + (h,) = (h,)
# save inner state to compute derivative in backprop easily
self.n_state = np.dot(self.Wnh, h) + self.bhn
self.n = self.h_act(term1 + term2) # activation(h,) = (h,)
#block4
term1 = (1 - self.z) * self.n # (h,) * (h,) = (h,)
term2 = self.z * h # (h,) * (h,) = (h,)
self.h_t = term1 + term2 # (h,) + (h,) = (h,)
assert self.x.shape == (self.d,)
assert self.hidden.shape == (self.h,)
assert self.r.shape == (self.h,)
assert self.z.shape == (self.h,)
assert self.n.shape == (self.h,)
assert self.h_t.shape == (self.h,) # h_t is the final output of you GRU cell.
return self.h_t
def backward(self, delta):
"""GRU cell backward.
This must calculate the gradients wrt the parameters and return the
derivative wrt the inputs, xt and ht, to the cell.
Input
-----
delta: (hidden_dim) #### this is basically dh_t, derivative of an h_t from forward
summation of derivative wrt loss from next layer at
the same time-step and derivative wrt loss from same layer at
next time-step.
Returns
-------
dx: (1, input_dim)
derivative of the loss wrt the input x.
dh: (1, hidden_dim)
derivative of the loss wrt the input hidden h.
"""
# 1) Reshape self.x and self.h to (input_dim, 1) and (hidden_dim, 1) respectively
# when computing self.dWs...
# 2) Transpose all calculated dWs...
# 3) Compute all of the derivatives
# 4) Know that the autograder grades the gradients in a certain order, and the
# local autograder will tell you which gradient you are currently failing.
# ADDITIONAL TIP:
# Make sure the shapes of the calculated dWs and dbs match the
# initalized shapes accordingly
input_dim, = self.x.shape
hidden_dim, = self.hidden.shape
#input = 5, hidden = 2
#derivatives are row vectors and actuals are column vectors.
#to begin with, delta shape is good to go as given by them
#some changes to pdf for gru but the small subparts only, overall it is the same
#calculate derivatives as given in pdf and transpose in the end to get shape of der = shape of actual
#all things should be vectors like (1,5)
#just folllow ppt very carefully and you will get around 26 equations for fwd
#Follow the derivatives from the saved values in these equations
#delta is same as dh_t
print(f'Original shapes:')
for elem in ('x','hidden','n','z','r','Wnx','bin','Wnh','bhn'):
elemname = elem
elem = eval('self.'+elem)
print(f'{elemname} shape: {elem.shape}', end="\t")
self.x = self.x.reshape(1,-1)
self.hidden = self.hidden.reshape(1,-1)
self.r = self.r.reshape(1,-1)
self.z = self.z.reshape(1,-1)
self.n = self.n.reshape(1,-1)
print(f'x reshaped: {self.x.shape}')
print(f'hidden reshaped: {self.hidden.shape}')
print(f'r reshaped: {self.r.shape}')
print(f'z reshaped: {self.z.shape}')
print(f'n reshaped: {self.n.shape}')
# create 5 derivatives here itself for ease of troubleshooting
dx = np.zeros_like(self.x)
dh = np.zeros_like(self.hidden)
dn = np.zeros_like(self.n)
dz = np.zeros_like(self.z)
dr = np.zeros_like(self.z)
print(f'dx shape : {dx.shape}')
print(f'dh shape : {dh.shape}')
#block4
dz += delta * (-self.n + self.hidden) # (1,h) * (1,h) = (1,h)
dn += delta * (1 - self.z) # (1,h) * (1,h) = (1,h)
dh += delta * self.z # (1,h) * (1,h) = (1,h)
print(f'dn shape : {dn.shape}')
print(f'dz shape : {dz.shape}')
print(f'dh shape : {dh.shape}')
#block3
grad_activ_n = dn * (1-self.n**2) # (1,h)
r_grad_activ_n = grad_activ_n * self.r # (1,h)
self.dWnx += np.dot(grad_activ_n.T, self.x) # (h,1) dot (1,d) = (h,d)
dx += np.dot(grad_activ_n, self.Wnx) # (1,h) dot (h,d) = (1,d)
self.dbin += np.sum(grad_activ_n, axis=0) # (1,h)
dr += grad_activ_n * self.n_state.T # (1,h)
print(f'grad_activ_n shape : {grad_activ_n.shape}')
print(f'self.dWnx shape : {self.dWnx.shape}')
print(f'dx shape : {dx.shape}')
print(f'self.dbin shape : {self.dbin.shape}')
self.dWnh += np.dot(r_grad_activ_n.T, self.hidden) # (h,1) dot (1,h) = (h,d)
dh += np.dot(r_grad_activ_n, self.Wnh) # (h,1) dot (1,h) = (h,d)
self.dbhn += np.sum(r_grad_activ_n, axis=0) # (1,h)
print(f'r_grad_activ_n shape : {r_grad_activ_n.shape}')
print(f'self.dWnh shape : {self.dWnh.shape}')
print(f'dh shape : {dh.shape}')
print(f'self.dbhn shape : {self.dbhn.shape}')
#block2
grad_activ_z = dz * self.z * (1-self.z) # (1,h) * (1,h) * (1,h) = (1,h)
dx += np.dot(grad_activ_z, self.Wzx) # (1,h) dot (h,d) = (1,d)
self.dWzx += np.dot(grad_activ_z.T, self.x) # (h,1) dot (1,d) = (h,d)
self.dWzh += np.dot(grad_activ_z.T, self.hidden) # (h,1) dot (1,d) = (h,d)
dh += np.dot(grad_activ_z, self.Wzh) # (1,h) dot (h,d) = (1,d)
self.dbiz += np.sum(grad_activ_z, axis=0) # (1,h)
self.dbhz += np.sum(grad_activ_z, axis=0) # (1,h)
print(f'grad_activ_z shape : {grad_activ_z.shape}')
print(f'dx shape : {dx.shape}')
print(f'self.dWzx shape : {self.dWzx.shape}')
print(f'self.dWzh shape : {self.dWzh.shape}')
print(f'dh shape : {dh.shape}')
print(f'self.dbiz shape : {self.dbiz.shape}')
print(f'self.dbhz shape : {self.dbhz.shape}')
#block1
grad_activ_r = dr * self.r * (1-self.r) # (h,1) dot (1,d) = (h,d)
dx += np.dot(grad_activ_r, self.Wrx) # (h,1) dot (1,d) = (h,d)
self.dWrx += np.dot(grad_activ_r.T, self.x) # (h,1) dot (1,d) = (h,d)
self.dWrh += np.dot(grad_activ_r.T, self.hidden) # (h,1) dot (1,d) = (h,d)
dh += np.dot(grad_activ_r, self.Wrh) # (h,1) dot (1,d) = (h,d)
self.dbir += np.sum(grad_activ_r, axis=0) # (h,1) dot (1,d) = (h,d)
self.dbhr += np.sum(grad_activ_r, axis=0) # (h,1) dot (1,d) = (h,d)
print(f'grad_activ_r shape : {grad_activ_r.shape}')
print(f'dx shape : {dx.shape}')
print(f'self.dWrx shape : {self.dWrx.shape}')
print(f'self.dWrh shape : {self.dWrh.shape}')
print(f'dh shape : {dh.shape}')
print(f'self.dbir shape : {self.dbir.shape}')
print(f'self.dbhr shape : {self.dbhr.shape}')
print('passed all till here')
return dx, dh
#layer 1
dh_t = delta # (1,h)
d19 = delta # (1,h)
d18 = delta # (1,h)
dz += d19 * self.hidden.T # (1,h) * (1,h) = (h,)
dh_partA = d19 * self.z # (1,h) * (1,h) = (1,h)
d17 = d18 * self.n # (h,) * (h,) = (h,)
dn = d18 * (1-self.z)#z17 # (h,) * (h,) = (h,)
dz = -d17 # (h,) * (h,) = (h,)
# print(f'dh_t ie. delta shape: {delta.shape}')
# print(f'd19 shape: {d19.shape}')
# print(f'd18 shape: {d18.shape}')
# print(f'dz shape: {dz.shape}')
# print(f'dh_partA shape: {dh_partA.shape}')
d1 = delta # (h,)
d19 = delta # (h,)
d18 = delta # (h,)
dz = d19 * self.hidden # (h,) * (h,) = (h,)
dh_partA = d19 * self.z # (h,) * (h,) = (h,)
# print(f'dh_t ie. delta shape: {delta.shape}')
# print(f'd19 shape: {d19.shape}')
# print(f'd18 shape: {d18.shape}')
# print(f'dz shape: {dz.shape}')
# print(f'dh_partA shape: {dh_partA.shape}')
# dz_t = (- self.n + self.hidden) * delta # (1,h)
# dn_t = (1 - self.z) * delta # (1,h)
# dh_partA = (self.z) * delta # (1,h)
# print(f'dz_t shape: {dz_t.shape}')
# print(f'dn_t shape: {dn_t.shape}')
# print(f'dh_t_partA shape: {dh_partA.shape}')
# #layer 2
# dtanh_n = self.h_act.derivative() # (1,h)
# dnt_Wnx = np.dot(self.x.T, dtanh_n).T # (d,1) * (1,h) = (d,h).T = (h,d)
# self.dWnx = dn_t * dnt_Wnx # (1,h) * (h,d) = ????
# print(f'dtanh_n shape: {dtanh_n.shape}')
# print(f'dnt_Wnx shape: {dnt_Wnx.shape}')
# print(f'self.dWnx shape: {self.dWnx.shape}')
'''self.x = self.x.reshape(input_dim, 1)
self.hidden = self.hidden.reshape(hidden_dim, 1)
self.z = self.z.reshape(hidden_dim, 1)
self.n = self.n.reshape(hidden_dim, 1)
d0 = delta #d16, d15
d1 = self.z * d0 # (h,1) * (h,1) = #d13
d2 = self.hidden * d0 # (h,1) * (h,)
d3 = self.n * d0 # (h,1) * (h,)
d4 = -1 * d3 # (h,)
d5 = d2 + d4 # (h,)
d6 = (1-self.z)*d0 # (h,1) * (h,)
d7 = d5 * self.z * (1-self.z) # (h,)*(h,1)*(h,1)
d8 = d6 * (1-self.n**2) # (h,)*(h,1)
d9 = np.dot(d8, self.Wnx.T) #Uh # (h,)*(h,d) = (???)
d10 = np.dot(d8, self.Wnh.T) #Wh # ().(h,h)
d11 = np.dot(d7, self.Wzx.T) #Uz # ().()
d12 = np.dot(d7, self.Wzh.T) #Wz # ().()
d14 = d10 * self.r # ()*()
d15 = d10 * self.hidden # ()*()
d16 = d15 * self.r * (1-self.r) # ()*()*()
d13 = np.dot(d16, self.Wrx.T) #Ur # ().()
d17 = np.dot(d16, self.Wrh.T) #Wr # ().()
print('delta:', delta.shape)
print(f'd0: {delta.shape} = {d0.shape}')
print(f'd1: {self.z.shape} * {d0.shape} = {d1.shape}')
print(f'd2: {self.hidden.shape} * {d0.shape} = {d2.shape}')
print(f'd3: {self.n.shape} * {d0.shape} = {d3.shape}')
print(f'd4: {d3.shape} = {d4.shape}')
print(f'd5: {d2.shape} + {d4.shape} = {d5.shape}')
print(f'd6: {self.z.shape} * {d0.shape} = {d6.shape}')
print(f'd7: {d5.shape} * {self.z.shape} * {self.z.shape} = {d7.shape}')
print(f'd8: {d6.shape} * {self.n.shape} = {d8.shape}')
print(f'd9: {d8.shape} dot {self.Wnx.shape} = {d9.shape}')
print(f'd10: {d8.shape} dot {self.Wnh.T.shape} {d10.shape}')
print(f'd11: {d11.shape} dot {self.Wzx.shape}')
print(f'd12: {d7.shape} dot {self.Wzh.shape} = {d12.shape}')
print(f'd14: {d10.shape} * {self.r.shape} = {d14.shape}')
print(f'd15: {d10.shape} * {self.hidden.shape} = {d15.shape}')
print(f'd16: {d15.shape} * {self.r.shape} * {self.r.shape} = {d16.shape}')
print(f'd13: {d16.shape} dot {self.Wrx.shape} = {d13.shape}')
print(f'd17: {d17.shape} dot {self.Wrh.T.shape} = {d17.shape}')
dx = d9 + d11 + d13
dh = d12 + d14 + d1 + d17
self.dWrx = np.dot(self.x.T, d16) #dUr
self.dWzx = np.dot(self.x.T, d7) #dUz
self.dWnx = np.dot(self.x.T, d8) #dUh
self.dWrh = np.dot(self.hidden.T, d16) #dWr
self.dWzh = np.dot(self.hidden.T, d7) #dWz
self.dWnh = np.dot((self.hidden.T * self.r).T, d8) #dWh
print('x:', self.x.shape, 'dx:', dx.shape)
print('h:', self.hidden.shape, 'dh:', dh.shape)'''
|
StarcoderdataPython
|
10538
|
<gh_stars>0
'''
说明: loc和iloc有几个功能
1. 可以获取一行或者多行数据
2. 可以获取1列或多列数据
3. 可以获取某个单元格的数据
对应dataframe来说, 在不指定index和columns的情况下,iloc和loc一样
区别在于,iloc根据索引下标取值, loc根据索引值取值
'''
import numpy as np
import pandas as pd
def test_1():
# 按行取值
pf = pd.DataFrame([[1, 2], [3, 4]])
iloc_0 = pf.iloc[0]
loc_0 = pf.loc[0]
assert pd.Series == type(iloc_0) == type(loc_0), 'loc error'
assert [1, 2
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc 2 error'
# 看看下面的区别,索引下标和索引值的区别
iloc_01 = pf.iloc[0:2]
loc_01 = pf.loc[0:1]
assert [[1, 2], [
3, 4
]] == iloc_01.values.tolist() == loc_01.values.tolist(), 'loc 3 error'
def test_2():
# 按列取值
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
iloc_0 = df.iloc[:, 0]
loc_0 = df.loc[:, 0]
assert pd.Series == type(iloc_0) == type(loc_0), 'loc2 1 error'
assert [
1, 4
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc2 2 error'
loc_01 = df.loc[:, 0:1]
assert pd.DataFrame == type(loc_01), 'loc2 3 error'
assert [[1, 2], [4, 5]] == loc_01.values.tolist(), 'loc2 4 error'
def test_3():
# 按单元格取值
df = pd.DataFrame([[1, 2], [3, 4]])
iloc_00 = df.iloc[0, 0]
loc_00 = df.loc[0, 0]
assert np.int64 == type(iloc_00) == type(loc_00), 'loc3 1 error'
assert 1.0 == iloc_00 == loc_00, 'loc3 2 error'
def test_4():
# loc 和iloc 区别, 当设置index或columns参数后
df = pd.DataFrame([[1, 2], [3, 4]],
index=['day1', 'day2'],
columns=['grape', 'pineapple'])
# 第一行
iloc_0 = df.iloc[0]
loc_0 = df.loc['day1']
assert [
1, 2
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc4 1 error'
# 第一列
iloc_col_0 = df.iloc[:, 0]
loc_col_0 = df.loc[:, 'grape']
assert [1, 3] == iloc_col_0.values.tolist() == loc_col_0.values.tolist(
), 'loc4 2 error'
|
StarcoderdataPython
|
3351441
|
"""empty message
Revision ID: 59f3082483dd
Revises: <PASSWORD>
Create Date: 2019-09-21 15:52:11.556530
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'twitch_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('twitch_id', sa.VARCHAR(length=128), autoincrement=False, nullable=False))
# ### end Alembic commands ###
|
StarcoderdataPython
|
4812928
|
import pandas as pd
import numpy as np
from scipy import sparse
import os
import sys
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score
import src.utils as utils
class HinDroid():
def __init__(self, B_mat, P_mat, metapaths):
self.B_mat = B_mat
self.P_mat = P_mat
self.metapaths = metapaths
self.kernels = self.construct_kernels(metapaths)
self.svms = [SVC(kernel='precomputed') for mp in metapaths]
def _kernel_func(self, metapath):
B_mat = self.B_mat
P_mat = self.P_mat
if metapath == 'AA':
f = lambda X, Y: np.dot(X, Y.T)
elif metapath == 'ABA':
f = lambda X, Y: np.dot(X, B_mat).dot(Y.T)
elif metapath == 'APA':
f = lambda X, Y: np.dot(X, P_mat).dot(Y.T)
elif metapath == 'APBPA':
f = lambda X, Y: np.dot(X, P_mat).dot(B_mat).dot(P_mat).dot(Y.T)
else:
raise NotImplementedError
return lambda X, Y: f(X, Y).todense()
def construct_kernels(self, metapaths):
kernels = []
for mp in metapaths:
kernels.append(self._kernel_func(mp))
return kernels
def _evaluate(self, X_train, X_test, y_train, y_test):
results = []
for mp, kernel, svm in zip(self.metapaths, self.kernels, self.svms):
print(f'Evaluating {mp}...', end='', file=sys.stderr, flush=True)
gram_train = kernel(X_train, X_train)
svm.fit(gram_train, y_train)
train_acc = svm.score(gram_train, y_train)
gram_test = kernel(X_test, X_train)
y_pred = svm.predict(gram_test)
test_acc = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
results.append(pd.Series({
'train_acc': train_acc, 'test_acc': test_acc, 'f1': f1,
'TP': tp, 'FP': fp, 'TN': tn, 'FN': fn
}))
print('done', file=sys.stderr)
return results
def evaluate(self, X, y, test_size=0.33):
X = sparse.csr_matrix(X, dtype='uint32')
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=test_size)
results = self._evaluate(X_train, X_test, y_train, y_test)
results = [res.rename(mp) for res, mp in zip(results, self.metapaths)]
results = pd.DataFrame(results)
results.index.name = 'metapath'
return results
def run(**config):
PROC_DIR = utils.PROC_DIR
A_mat, B_mat, P_mat = [
sparse.load_npz(os.path.join(PROC_DIR, mat))
for mat in ['A.npz', 'B.npz', 'P.npz']
]
meta_fp = os.path.join(PROC_DIR, 'meta.csv')
meta = pd.read_csv(meta_fp, index_col=0)
print(meta.label.value_counts())
labels = (meta.label == 'class1').astype(int).values
metapaths = ['AA', 'APA', 'ABA', 'APBPA']
hin = HinDroid(B_mat, P_mat, metapaths)
results = hin.evaluate(A_mat, labels)
print(results)
out_csv = os.path.join(PROC_DIR, 'results.csv')
results.to_csv(out_csv)
# runs = []
# for i in range(10):
# results = hin.evaluate(A_mat, labels)
# print(results)
# runs.append(results)
# out_csv = os.path.join(PROC_DIR, f'results_{i}.csv')
# results.to_csv(out_csv)
|
StarcoderdataPython
|
102355
|
# -*- coding: utf-8 -*-
# Copyright: <NAME> <<EMAIL>>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import zipfile, os
import unicodedata
from anki.utils import tmpfile, json
from anki.importing.anki2 import Anki2Importer
class AnkiPackageImporter(Anki2Importer):
def run(self):
# extract the deck from the zip file
self.zip = z = zipfile.ZipFile(self.file)
col = z.read("collection.anki2")
colpath = tmpfile(suffix=".anki2")
open(colpath, "wb").write(col)
self.file = colpath
# we need the media dict in advance, and we'll need a map of fname ->
# number to use during the import
self.nameToNum = {}
for k, v in json.loads(z.read("media")).items():
self.nameToNum[v] = k
# run anki2 importer
Anki2Importer.run(self)
# import static media
for file, c in self.nameToNum.items():
if not file.startswith("_") and not file.startswith("latex-"):
continue
path = os.path.join(self.col.media.dir(),
unicodedata.normalize("NFC", file))
if not os.path.exists(path):
open(path, "wb").write(z.read(c))
def _srcMediaData(self, fname):
if fname in self.nameToNum:
return self.zip.read(self.nameToNum[fname])
return None
|
StarcoderdataPython
|
129237
|
<reponame>Giving-Tuesday/wbpy<filename>wbpy/tests/indicator_data.py
# -*- coding: utf-8 -*-
import datetime
import wbpy
class TestData(object):
""" API response data for testing. """
def __init__(self):
self.dataset = wbpy.IndicatorDataset(self.response, self.url, self.date)
class Yearly(TestData):
url = "api.worldbank.org/countries/GB;AR;SA;HK/indicators/SP.POP.TOTL?format=json&per_page=1000&mrv=2"
date = datetime.date(2013, 9, 19)
response = [
{
"page": 1,
"pages": 1,
"per_page": "1000",
"total": 8
},
[
{
"indicator": {
"id": "SP.POP.TOTL",
"value": "Population, total"
},
"country": {
"id": "AR",
"value": "Argentina"
},
"value": "41086927",
"decimal": "0",
"date": "2012"
},
{
"indicator": {
"id": "SP.POP.TOTL",
"value": "Population, total"
},
"country": {
"id": "AR",
"value": "Argentina"
},
"value": "40728738",
"decimal": "0",
"date": "2011"
},
{
"indicator": {
"id": "SP.POP.TOTL",
"value": "Population, total"
},
"country": {
"id": "GB",
"value": "United Kingdom"
},
"value": "63227526",
"decimal": "0",
"date": "2012"
},
{
"indicator": {
"id": "SP.POP.TOTL",
"value": "Population, total"
},
"country": {
"id": "GB",
"value": "United Kingdom"
},
"value": "62752472",
"decimal": "0",
"date": "2011"
},
{
"indicator": {
"id": "SP.POP.TOTL",
"value": "Population, total"
},
"country": {
"id": "HK",
"value": "Hong Kong SAR, China"
},
"value": "7154600",
"decimal": "0",
"date": "2012"
},
{
"indicator": {
"id": "SP.POP.TOTL",
"value": "Population, total"
},
"country": {
"id": "HK",
"value": "Hong Kong SAR, China"
},
"value": "7071600",
"decimal": "0",
"date": "2011"
},
{
"indicator": {
"id": "SP.POP.TOTL",
"value": "Population, total"
},
"country": {
"id": "SA",
"value": "Saudi Arabia"
},
"value": "28287855",
"decimal": "0",
"date": "2012"
},
{
"indicator": {
"id": "SP.POP.TOTL",
"value": "Population, total"
},
"country": {
"id": "SA",
"value": "Saudi Arabia"
},
"value": "27761728",
"decimal": "0",
"date": "2011"
}
]
]
indicator = {
"id": "SP.POP.TOTL",
"name": "Population, total",
"source": {
"id": "2",
"value": "World Development Indicators"
},
"sourceNote": "Total population is based on the de facto definition of population, which counts all residents regardless of legal status or citizenship--except for refugees not permanently settled in the country of asylum, who are generally considered part of the population of their country of origin. The values shown are midyear estimates.",
"sourceOrganization": "(1) United Nations Population Division. World Population Prospects, (2) United Nations Statistical Division. Population and Vital Statistics Report (various years), (3) Census reports and other statistical publications from national statistical offices, (4) Eurostat: Demographic Statistics, (5) Secretariat of the Pacific Community: Statistics and Demography Programme, and (6) U.S. Census Bureau: International Database.",
"topics": [
{
"id": "8",
"value": "Health "
},
{
"id": "19",
"value": "Climate Change"
}
]
}
def __init__(self):
self.dataset = wbpy.IndicatorDataset(self.response, self.url, self.date)
class Monthly(TestData):
url = "api.worldbank.org/en/countries/ind;chn/indicators/DPANUSSPF?MRV=7&frequency=M&format=json"
date = datetime.date(2013, 9, 19)
response = [
{
"page": 1,
"pages": 1,
"per_page": "50",
"total": 14
},
[
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "CN",
"value": "China"
},
"value": "6.12179545455",
"decimal": "0",
"date": "2013M08"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "CN",
"value": "China"
},
"value": "6.13418695652",
"decimal": "0",
"date": "2013M07"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "CN",
"value": "China"
},
"value": "6.13445",
"decimal": "0",
"date": "2013M06"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "CN",
"value": "China"
},
"value": "6.14102173913",
"decimal": "0",
"date": "2013M05"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "CN",
"value": "China"
},
"value": "6.18657272727",
"decimal": "0",
"date": "2013M04"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "CN",
"value": "China"
},
"value": "6.2159",
"decimal": "0",
"date": "2013M03"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "CN",
"value": "China"
},
"value": "6.233015",
"decimal": "0",
"date": "2013M02"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "IN",
"value": "India"
},
"value": "62.91897727273",
"decimal": "0",
"date": "2013M08"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "IN",
"value": "India"
},
"value": "59.8094",
"decimal": "0",
"date": "2013M07"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "IN",
"value": "India"
},
"value": "58.3845",
"decimal": "0",
"date": "2013M06"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "IN",
"value": "India"
},
"value": "54.99103043478",
"decimal": "0",
"date": "2013M05"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "IN",
"value": "India"
},
"value": "54.38226363636",
"decimal": "0",
"date": "2013M04"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "IN",
"value": "India"
},
"value": "54.42345238095",
"decimal": "0",
"date": "2013M03"
},
{
"indicator": {
"id": "DPANUSSPF",
"value": "Exchange rate, old LCU per USD extended forward, period average"
},
"country": {
"id": "IN",
"value": "India"
},
"value": "53.841375",
"decimal": "0",
"date": "2013M02"
}
]
]
indicator = {
"id": "DPANUSSPF",
"name": "Exchange rate, old LCU per USD extended forward, period average",
"source": {
"id": "15",
"value": "Global Economic Monitor"
},
"sourceNote": "Local currency units (LCU) per U.S. dollar, with values after a new currency's introduction presented in the old currency's terms",
"sourceOrganization": "World Bank staff calculations based on Datastream and IMF International Finance Statistics data.",
"topics": [
{ }
]
}
class Quarterly(TestData):
url = "api.worldbank.org/en/countries/es/indicators/NEER?MRV=10&frequency=Q"
date = datetime.date(2013, 9, 19)
response = [
{
"page": 1,
"pages": 1,
"per_page": "50",
"total": 10
},
[
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "101.96915025708",
"decimal": "0",
"date": "2013Q3"
},
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "101.63248639595",
"decimal": "0",
"date": "2013Q2"
},
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "101.52582061816",
"decimal": "0",
"date": "2013Q1"
},
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "100.18916509029",
"decimal": "0",
"date": "2012Q4"
},
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "99.16917359022",
"decimal": "0",
"date": "2012Q3"
},
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "100.11249906251",
"decimal": "0",
"date": "2012Q2"
},
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "100.78249347922",
"decimal": "0",
"date": "2012Q1"
},
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "101.79581836818",
"decimal": "0",
"date": "2011Q4"
},
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "102.65581120157",
"decimal": "0",
"date": "2011Q3"
},
{
"indicator": {
"id": "NEER",
"value": "Nominal Effecive Exchange Rate"
},
"country": {
"id": "ES",
"value": "Spain"
},
"value": "103.22580645161",
"decimal": "0",
"date": "2011Q2"
}
]
]
indicator = {
"id": "NEER",
"name": "Nominal Effecive Exchange Rate",
"source": {
"id": "15",
"value": "Global Economic Monitor"
},
"sourceNote": "A measure of the value of a currency against a weighted average of several foreign currencies",
"sourceOrganization": "World Bank staff calculations based on Datastream and IMF International Finance Statistics data.",
"topics": [
{ }
]
}
|
StarcoderdataPython
|
1613661
|
"""
Easy PTVSD Module.
Contains any decorators or convenience functions for PTVSD.
"""
import ptvsd
class wait_and_break:
"""
Decorator to create ptvsd server, wait for attach, break into debugger, continue.
This pattern of using a class to make a decorator cleans up the double nested
functions needed to create a decorator that accepts arguments. Maybe a little hacky
but I believe this definitely follows the zen of python more than a double nested
decorator function.
Essentially, when the decorator is evaluated it returns a new wait_and_break object
that will be invoked with the decorated function as an argument and return the new
wrapped function.
"""
DEFAULT_SECRET = "my_secret"
DEFAULT_ADDRESS = ("0.0.0.0", 3000)
def __init__(self, secret=DEFAULT_SECRET, address=DEFAULT_ADDRESS):
"""Set default secret and address."""
self.secret = secret
self.address = address
def __call__(self, function):
"""Run ptvsd code and continue with decorated function."""
def wait_and_break_deco(*args, **kwargs):
ptvsd.enable_attach(self.secret, address=self.address)
ptvsd.wait_for_attach()
ptvsd.break_into_debugger()
return function(*args, **kwargs)
return wait_and_break_deco
|
StarcoderdataPython
|
174192
|
from django.core.validators import MinValueValidator, MaxValueValidator
from PIL import Image
# to use own user class
from django.conf import settings
from django.db import models
class Ticket(models.Model):
class Meta:
ordering = ["-time_created"]
title = models.CharField(max_length=128)
description = models.TextField(max_length=2048, blank=True)
user = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
image = models.ImageField(null=True, blank=True)
time_created = models.DateTimeField(auto_now_add=True)
IMAGE_MAX_SIZE = (400, 400)
def resize_image(self):
try:
image = Image.open(self.image)
image.thumbnail(self.IMAGE_MAX_SIZE)
image.save(self.image.path)
except Exception as e:
print("no image to resize", e)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.resize_image()
def __str__(self):
return f'{self.title}'
class Review(models.Model):
ticket = models.ForeignKey(to=Ticket, on_delete=models.CASCADE)
rating = models.PositiveSmallIntegerField(validators=[MinValueValidator(0), MaxValueValidator(5)])
user = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
headline = models.CharField(max_length=128)
body = models.TextField(max_length=8192, blank=True)
time_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.headline}'
class UserFollows(models.Model):
user = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="following")
followed_user = models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="followed_by"
)
class Meta:
# ensures we don't get multiple UserFollows instances
# for unique user-user_followed pairs
unique_together = (
"user",
"followed_user",
)
# we won't follow ourself
constraints = [
models.CheckConstraint(
name="%(app_label)s_%(class)s_prevent_self_follow",
check=~models.Q(user=models.F("followed_user")),
),
]
def __str__(self):
return f'{self.user} follows {self.followed_user}'
|
StarcoderdataPython
|
3299300
|
import numpy as np
import numpy.linalg as LA
import scipy.io as sio # not working for me
import networkx as nx
import scipy as sp
import matplotlib.pyplot as plt
from matplotlib import cm
#from scipy.stats import entropy
from time import time
import random, math
# magic numbers
_smallnumber = 1E-6
class SNMF():
"""
Input:
-- V: m x n matrix, the dataset
Optional Input/Output:
-- l: penalty lambda (trade-off parameter between the regularization term and the loss term.)
-- w_init: basis matrix with size m x r
-- h_init: weight matrix with size r x n (r is the number of cluster)
-- Output: w, h
"""
def __init__(self, x, h_init = None, r = 2, batch_number = 10, max_iter = 100):
self.x = x.todense()
self.r = r
self.max_iter = max_iter
print("Constructor call: The matrix's row and column are: ", self.x.shape[0], self.x.shape[1], "total iteration: ", self.max_iter)
self.batch_number = batch_number
self.batch_number_range = self.x.shape[0]
self.mini_batch_size = math.ceil(x.shape[0] / self.batch_number)
self.batch_x = np.asmatrix(np.zeros((self.mini_batch_size,self.mini_batch_size)))
print("Constructor call: Batch number is : ", batch_number, " with mini_batch_size: ", self.mini_batch_size, "batch_x has shape:", self.batch_x.shape)
self.h = h_init
self.errors = np.zeros(self.max_iter)
def frobenius_norm(self):
""" Euclidean error between x and h * h.T """
if hasattr(self, 'h'): # if it has attributes w and h
error = LA.norm(self.x - self.h*self.h.T)
else:
error = None
return error
def bgd_solver(self, alpha = 0.001, eps = None, debug = None):
if(self.batch_number == 1): # normal MUR
for iter in range(self.max_iter):
self.errors[iter] = LA.norm(self.x - self.h * self.h.T)
# if (self.errors[iter] > 1) and (abs(self.errors[iter]-self.errors[iter-1]) < eps):
# # print("error1: ", self.errors[iter], "error2:", self.errors[iter-1])
# print("stop condition met at iteration: ", iter)
# return self.h
numerator = self.x*self.h
denominator = (((self.h*self.h.T)*self.h) + 2 ** -8)
self.h = np.multiply(self.h, np.divide(numerator, denominator))
# count = 0
# for i in range(self.h.shape[0]):
# for j in range(self.h.shape[1]):
# # print(self.h[i,j])
# if self.h[i,j]<0:
# count += 1
# print("(", i, ",", j, ")")
# print("negative numbers:", count)
else:
batch_h = np.asmatrix(np.zeros((self.mini_batch_size,self.r)))
for iter in range(self.max_iter): # stochastic MUR
self.errors[iter] = np.linalg.norm(self.x - self.h * self.h.T, 'fro') # record error
# if (self.errors[iter] > 1) and (abs(self.errors[iter]-self.errors[iter-1]) < eps):
# # print("error1: ", self.errors[iter], "error2:", self.errors[iter-1])
# print("stop condition met at iteration: ", iter)
# return self.h
tmp_list = self.generate_random_numbers(upper_bound = self.batch_number_range, num = self.mini_batch_size)
# print("tmp_list: ", tmp_list, "type: ", type(tmp_list), "length: ", len(tmp_list))
# an ugly matrix to create batch matrix
i = 0
while i < len(tmp_list):
j = i
batch_h[i,:] = self.h[tmp_list[i],:]
while j < len(tmp_list):
self.batch_x[i,j] = self.x[tmp_list[i],tmp_list[j]]
self.batch_x[j,i] = self.x[tmp_list[i],tmp_list[j]]
j += 1
i += 1
grad = 4 * (batch_h * batch_h.T * batch_h - self.batch_x * batch_h)
# print("grad", grad)
update = batch_h - alpha * grad
i = 0
while i < len(tmp_list):
j = 0
count = 0
while j < update.shape[1]:
if update[i,j] < 0:
update[i,j] = 0
count += 1
j += 1
self.h[tmp_list[i],:] = update[i,:]
i += 1
return self.h
def get_error_trend(self):
return self.errors
# generate a list of random number from range [0, range], with size num
def generate_random_numbers(self, upper_bound, num):
seq = list(range(0,upper_bound))
return random.sample(seq,num)
"""
----------------------------------------EmailEuCore----------------------------------------
"""
G = nx.Graph()
with open('email-v1005-e25571-c42/email-Eu-core.txt','r') as f:
for line in f:
line=line.split()#split the line up into a list - the first entry will be the node, the others his friends
# print(len(line), "cont", line[0])
if len(line)==1:#in case the node has no friends, we should still add him to the network
if line[0] not in G:
nx.add_node(line[0])
else:#in case the node has friends, loop over all the entries in the list
focal_node = line[0]#pick your node
# print(line[1:])
for friend in line[1:]:#loop over the friends
if friend != focal_node:
G.add_edge(focal_node,friend)#add each edge to the graph
cluster_num = 42
t0 = time()
A = nx.adjacency_matrix(G) #(1005,1005)
t1 = time()
### Output degrees of this graph
# text_file1 = open('email-v1005-e25571-c42/degree.txt','w')
# for s in G.degree():
# for s1 in s:
# text_file1.write("%s " % (s1))
# text_file1.write("\n")
# text_file1.close()
# count = 0
# for n in range(A.shape[0]):
# if A[n,n] != 0:
# count += 1
# print(count," count ========")
initial_h = np.asmatrix(np.random.rand(A.shape[0], cluster_num)) # h's initialization, as a matrix
grid1 = A.todense() # initial x
grid2 = np.dot(initial_h,initial_h.T) # initial h
A_nmf = SNMF(x=A, r=cluster_num, h_init = initial_h, batch_number=10, max_iter=40) # call snmf's constructor
print("Staring error is: ", A_nmf.frobenius_norm())
print("Start running...")
t0 = time()
# result = A_nmf.bgd_solver(alpha = 0.01, eps = 0.000001)
result = A_nmf.bgd_solver(alpha = 0.01) # run gd, return h
t1 = time()
print('Final error is: ', A_nmf.frobenius_norm(), '\nTime taken: ', t1 - t0)
# cluster_result = np.zeros(result.shape) #(986, 42)
# print(cluster_result.shape,"type:", type(cluster_result))
text_file = open('email-v1005-e25571-c42/output.txt','w')
index = 0
for row in range(result.shape[0]):
max_id = 0
for col in range(1, result.shape[1]):
if result[row, col] > result[row, col-1]:
max_id = col
text_file.write("%s %s\n" % (row, max_id))
# print("%s %s\n" % (row, max_id))
index = 0
text_file.close()
# print("final result for H has shape:", result.shape)
# print("clustering result", np.amax(result.T, axis=1)) # 0 for row, 1 for column
"""
----------------------------------------Dolphin----------------------------------------
"""
# # read .gml
# G = nx.read_gml('dolphins-v62-e159/dolphins.gml') # 62 vertices
# cluster_num = 2
# A = nx.adjacency_matrix(G) #(62,62) # get adjacency matrix
# # print("line 126, type of dense A:" , type(A.todense())) # <class 'scipy.sparse.csr.csr_matrix'>,
# # to dense: <class 'numpy.matrixlib.defmatrix.matrix'>
# initial_h = np.asmatrix(np.random.rand(A.shape[0], cluster_num)) # h's initialization, as a matrix
# # initial_h = np.asmatrix(np.ones((A.shape[0], cluster_num)))
# print(type(initial_h))
# # print("line 127, type of initial_h" , type(initial_h), "shape: ", initial_h.shape)
# # print("initial h [1]::::: ",initial_h[0,0])
# grid1 = A.todense() # initial x
# grid2 = np.dot(initial_h,initial_h.T) # initial h
# A_nmf = SNMF(x=A, r=cluster_num, h_init = initial_h, batch_number=10, max_iter=10000) # call snmf's constructor
# print("Staring error is: ", A_nmf.frobenius_norm())
# print("Start running...")
# t0 = time()
# # result = A_nmf.bgd_solver(alpha = 0.01, eps = 0.000001)
# result = A_nmf.bgd_solver(alpha = 0.01) # run gd, return h
# t1 = time()
# # print(result[0,0])
# print('Final error is: ', A_nmf.frobenius_norm(), 'Time taken: ', t1 - t0)
# dolphins = sio.loadmat('dolphins-v62-e159/dolphins_rlabels')
# label = dolphins['labels'].T
# correct_count = 0
# for i in range(result.shape[0]):
# if result[i,0] < result[i,1]:
# # print(label[i], "-- 1")
# if label[i] == 1:
# correct_count += 1
# else:
# # print(label[i], "-- 2")
# if label[i] == 2:
# correct_count += 1
# print("correct_count: ", correct_count)
"""
----------------------------------------PLOT----------------------------------------
"""
plt.plot(A_nmf.get_error_trend())
plt.show()
grid3 = np.dot(result,result.T) # result matrix
fig = plt.figure()
# subplot 1, initial matrix
ax = fig.add_subplot(141)
im = ax.imshow(grid1)
plt.colorbar(im)
# subplot 2, color map
ax = fig.add_subplot(142)
im = ax.imshow(grid2)
plt.colorbar(im)
ax = fig.add_subplot(143)
im = ax.imshow(grid3)
plt.colorbar(im)
# subplot 2, color map
ax = fig.add_subplot(144)
im = ax.imshow(result)
plt.colorbar(im)
plt.tight_layout()
plt.show()
|
StarcoderdataPython
|
1662827
|
# Sample local settings file
# Copy this to localsettings.py and edit settings as needed
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
# list of IPs that are allowed to see debug output in templates
INTERNAL_IPS = []
ALLOWED_HOSTS = []
# SECURITY WARNING: keep the secret key used in production secret!
# You can generate one here: http://www.miniwebtool.com/django-secret-key-generator/
SECRET_KEY = ']:<KEY>'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Exist DB Settings
EXISTDB_SERVER_URL = 'http://localhost:8080/exist/'
# exist admin account for testing
EXISTDB_SERVER_USER = "admin"
EXISTDB_SERVER_PASSWORD = ""
EXISTDB_ROOT_COLLECTION = "/ddi_data"
EXISTDB_TEST_COLLECTION = "/test/rsk_ddi_data"
# a bug in python xmlrpclib loses the timezone; override it here
# most likely, you want either tz.tzlocal() or tz.tzutc()
from dateutil import tz
EXISTDB_SERVER_TIMEZONE = tz.tzlocal()
# geonames username to use when geocoding locations at data-load time
GEONAMES_USERNAME = ''
# Logging
# https://docs.djangoproject.com/en/1.6/topics/logging/
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'basic': {
'format': '[%(asctime)s] %(levelname)s:%(name)s::%(message)s',
'datefmt': '%d/%b/%Y %H:%M:%S',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'basic'
},
},
'loggers': {
'ddisearch': {
'handlers': ['console'],
'level': 'INFO',
}
}
}
|
StarcoderdataPython
|
3372513
|
<gh_stars>0
# Django settings for project.
import os
import urlparse
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = os.path.exists('.debug') or (os.environ.has_key('DEBUG') and os.environ['DEBUG'] == "1")
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('<NAME>', 'dontspamme'),
)
MANAGERS = ADMINS
import dj_database_url
DATABASES = {'default': dj_database_url.config(default='sqlite:////%s/dev.db' % BASE_DIR)}
if not DEBUG:
os.environ['MEMCACHE_SERVERS'] = os.environ.get('MEMCACHIER_SERVERS', '').replace(',', ';')
os.environ['MEMCACHE_USERNAME'] = os.environ.get('MEMCACHIER_USERNAME', '')
os.environ['MEMCACHE_PASSWORD'] = os.environ.get('MEMCACHIER_PASSWORD', '')
CACHES = {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'TIMEOUT': 500,
'BINARY': True,
'OPTIONS': { 'tcp_nodelay': True }
}
}
redis_url = urlparse.urlparse(os.environ.get('REDISTOGO_URL', ''))
SESSION_ENGINE = 'redis_sessions.session'
SESSION_REDIS_HOST = redis_url.hostname
SESSION_REDIS_PORT = redis_url.port
SESSION_REDIS_DB = 0
SESSION_REDIS_PASSWORD = redis_url.password
SESSION_REDIS_PREFIX = 'session'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ex7r9l_mt$^_9up(0iwjsgeasv8u(28c2_@hv&wddk4&u3jfeq'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django.contrib.admin',
'tastypie',
'south',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
StarcoderdataPython
|
1748863
|
# By manish.17, contest: ITMO Academy. Двоичный поиск - 2, problem: (C) Very Easy Task
# https://codeforces.com/profile/manish.17
n, x, y = map(int, input().split())
alpha, omega = min(x, y), 10**18
if n == 1:
print(min(x, y))
quit()
while alpha < omega:
mid = (alpha + omega)//2
if (mid - min(x, y))//x + (mid - min(x, y))//y >= n-1:
omega = mid
else:
alpha = mid + 1
print(omega)
|
StarcoderdataPython
|
8810
|
import copy
import inspect
import json
import logging
import pytest
import re
import os
import shutil
import subprocess
import time
from datetime import datetime, timedelta
from configparser import ConfigParser, ExtendedInterpolation
from typing import Dict, List, Optional
from pyhttpd.certs import CertificateSpec
from .md_cert_util import MDCertUtil
from pyhttpd.env import HttpdTestSetup, HttpdTestEnv
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class MDTestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["proxy_connect", "md"])
if "pebble" == self.env.acme_server:
self._make_pebble_conf()
def _make_pebble_conf(self):
our_dir = os.path.dirname(inspect.getfile(MDTestSetup))
conf_src_dir = os.path.join(our_dir, 'pebble')
conf_dest_dir = os.path.join(self.env.gen_dir, 'pebble')
if not os.path.exists(conf_dest_dir):
os.makedirs(conf_dest_dir)
for name in os.listdir(conf_src_dir):
src_path = os.path.join(conf_src_dir, name)
m = re.match(r'(.+).template', name)
if m:
self._make_template(src_path, os.path.join(conf_dest_dir, m.group(1)))
elif os.path.isfile(src_path):
shutil.copy(src_path, os.path.join(conf_dest_dir, name))
class MDTestEnv(HttpdTestEnv):
MD_S_UNKNOWN = 0
MD_S_INCOMPLETE = 1
MD_S_COMPLETE = 2
MD_S_EXPIRED = 3
MD_S_ERROR = 4
EMPTY_JOUT = {'status': 0, 'output': []}
DOMAIN_SUFFIX = "%d.org" % time.time()
LOG_FMT_TIGHT = '%(levelname)s: %(message)s'
@classmethod
def get_acme_server(cls):
return os.environ['ACME'] if 'ACME' in os.environ else "pebble"
@classmethod
def has_acme_server(cls):
return cls.get_acme_server() != 'none'
@classmethod
def has_acme_eab(cls):
return cls.get_acme_server() == 'pebble'
@classmethod
def is_pebble(cls) -> bool:
return cls.get_acme_server() == 'pebble'
@classmethod
def lacks_ocsp(cls):
return cls.is_pebble()
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(MDTestEnv)),
interesting_modules=["md"])
self._acme_server = self.get_acme_server()
self._acme_tos = "accepted"
self._acme_ca_pemfile = os.path.join(self.gen_dir, "apache/acme-ca.pem")
if "pebble" == self._acme_server:
self._acme_url = "https://localhost:14000/dir"
self._acme_eab_url = "https://localhost:14001/dir"
elif "boulder" == self._acme_server:
self._acme_url = "http://localhost:4001/directory"
self._acme_eab_url = None
else:
raise Exception(f"unknown ACME server type: {self._acme_server}")
self._acme_server_down = False
self._acme_server_ok = False
self._a2md_bin = os.path.join(self.bin_dir, 'a2md')
self._default_domain = f"test1.{self.http_tld}"
self._store_dir = "./md"
self.set_store_dir_default()
self.add_cert_specs([
CertificateSpec(domains=[f"expired.{self._http_tld}"],
valid_from=timedelta(days=-100),
valid_to=timedelta(days=-10)),
CertificateSpec(domains=["localhost"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
#"AH10045", # mod_md complains that there is no vhost for an MDomain
"AH10105", # mod_md does not find a vhost with SSL enabled for an MDomain
"AH10085" # mod_ssl complains about fallback certificates
])
if self.lacks_ocsp():
self.httpd_error_log.set_ignored_patterns([
re.compile(r'.*certificate with serial \S+ has no OCSP responder URL.*'),
])
if setup_dirs:
self._setup = MDTestSetup(env=self)
self._setup.make()
self.issue_certs()
self.clear_store()
def set_store_dir_default(self):
dirpath = "md"
if self.httpd_is_at_least("2.5.0"):
dirpath = os.path.join("state", dirpath)
self.set_store_dir(dirpath)
def set_store_dir(self, dirpath):
self._store_dir = os.path.join(self.server_dir, dirpath)
if self.acme_url:
self.a2md_stdargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile, "-j"])
self.a2md_rawargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile])
def get_apxs_var(self, name: str) -> str:
p = subprocess.run([self._apxs, "-q", name], capture_output=True, text=True)
if p.returncode != 0:
return ""
return p.stdout.strip()
@property
def acme_server(self):
return self._acme_server
@property
def acme_url(self):
return self._acme_url
@property
def acme_tos(self):
return self._acme_tos
@property
def a2md_bin(self):
return self._a2md_bin
@property
def acme_ca_pemfile(self):
return self._acme_ca_pemfile
@property
def store_dir(self):
return self._store_dir
def get_request_domain(self, request):
return "%s-%s" % (re.sub(r'[_]', '-', request.node.originalname), MDTestEnv.DOMAIN_SUFFIX)
def get_method_domain(self, method):
return "%s-%s" % (re.sub(r'[_]', '-', method.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_module_domain(self, module):
return "%s-%s" % (re.sub(r'[_]', '-', module.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_class_domain(self, c):
return "%s-%s" % (re.sub(r'[_]', '-', c.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
# --------- cmd execution ---------
_a2md_args = []
_a2md_args_raw = []
def a2md_stdargs(self, args):
self._a2md_args = [] + args
def a2md_rawargs(self, args):
self._a2md_args_raw = [] + args
def a2md(self, args, raw=False) -> ExecResult:
preargs = self._a2md_args
if raw:
preargs = self._a2md_args_raw
log.debug("running: {0} {1}".format(preargs, args))
return self.run(preargs + args)
def check_acme(self):
if self._acme_server_ok:
return True
if self._acme_server_down:
pytest.skip(msg="ACME server not running")
return False
if self.is_live(self.acme_url, timeout=timedelta(seconds=0.5)):
self._acme_server_ok = True
return True
else:
self._acme_server_down = True
pytest.fail(msg="ACME server not running", pytrace=False)
return False
def get_ca_pem_file(self, hostname: str) -> Optional[str]:
pem_file = super().get_ca_pem_file(hostname)
if pem_file is None:
pem_file = self.acme_ca_pemfile
return pem_file
# --------- access local store ---------
def purge_store(self):
log.debug("purge store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if os.path.exists(self._store_dir):
shutil.rmtree(self._store_dir, ignore_errors=False)
os.makedirs(self._store_dir)
def clear_store(self):
log.debug("clear store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if not os.path.exists(self._store_dir):
os.makedirs(self._store_dir)
for dirpath in ["challenges", "tmp", "archive", "domains", "accounts", "staging", "ocsp"]:
shutil.rmtree(os.path.join(self._store_dir, dirpath), ignore_errors=True)
def clear_ocsp_store(self):
assert len(self._store_dir) > 1
dirpath = os.path.join(self._store_dir, "ocsp")
log.debug("clear ocsp store dir: %s" % dir)
if os.path.exists(dirpath):
shutil.rmtree(dirpath, ignore_errors=True)
def authz_save(self, name, content):
dirpath = os.path.join(self._store_dir, 'staging', name)
os.makedirs(dirpath)
open(os.path.join(dirpath, 'authz.json'), "w").write(content)
def path_store_json(self):
return os.path.join(self._store_dir, 'md_store.json')
def path_account(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.json')
def path_account_key(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.pem')
def store_domains(self):
return os.path.join(self._store_dir, 'domains')
def store_archives(self):
return os.path.join(self._store_dir, 'archive')
def store_stagings(self):
return os.path.join(self._store_dir, 'staging')
def store_challenges(self):
return os.path.join(self._store_dir, 'challenges')
def store_domain_file(self, domain, filename):
return os.path.join(self.store_domains(), domain, filename)
def store_archived_file(self, domain, version, filename):
return os.path.join(self.store_archives(), "%s.%d" % (domain, version), filename)
def store_staged_file(self, domain, filename):
return os.path.join(self.store_stagings(), domain, filename)
def path_fallback_cert(self, domain):
return os.path.join(self._store_dir, 'domains', domain, 'fallback-pubcert.pem')
def path_job(self, domain):
return os.path.join(self._store_dir, 'staging', domain, 'job.json')
def replace_store(self, src):
shutil.rmtree(self._store_dir, ignore_errors=False)
shutil.copytree(src, self._store_dir)
def list_accounts(self):
return os.listdir(os.path.join(self._store_dir, 'accounts'))
def check_md(self, domain, md=None, state=-1, ca=None, protocol=None, agreement=None, contacts=None):
domains = None
if isinstance(domain, list):
domains = domain
domain = domains[0]
if md:
domain = md
path = self.store_domain_file(domain, 'md.json')
with open(path) as f:
md = json.load(f)
assert md
if domains:
assert md['domains'] == domains
if state >= 0:
assert md['state'] == state
if ca:
assert md['ca']['url'] == ca
if protocol:
assert md['ca']['proto'] == protocol
if agreement:
assert md['ca']['agreement'] == agreement
if contacts:
assert md['contacts'] == contacts
def pkey_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "privkey.{0}.pem".format(pkeyspec)
return 'privkey.pem'
def cert_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "pubcert.{0}.pem".format(pkeyspec)
return 'pubcert.pem'
def check_md_complete(self, domain, pkey=None):
md = self.get_md_status(domain)
assert md
assert 'state' in md, "md is unexpected: {0}".format(md)
assert md['state'] is MDTestEnv.MD_S_COMPLETE, "unexpected state: {0}".format(md['state'])
assert os.path.isfile(self.store_domain_file(domain, self.pkey_fname(pkey)))
assert os.path.isfile(self.store_domain_file(domain, self.cert_fname(pkey)))
def check_md_credentials(self, domain):
if isinstance(domain, list):
domains = domain
domain = domains[0]
else:
domains = [domain]
# check private key, validate certificate, etc
MDCertUtil.validate_privkey(self.store_domain_file(domain, 'privkey.pem'))
cert = MDCertUtil(self.store_domain_file(domain, 'pubcert.pem'))
cert.validate_cert_matches_priv_key(self.store_domain_file(domain, 'privkey.pem'))
# check SANs and CN
assert cert.get_cn() == domain
# compare lists twice in opposite directions: SAN may not respect ordering
san_list = list(cert.get_san_list())
assert len(san_list) == len(domains)
assert set(san_list).issubset(domains)
assert set(domains).issubset(san_list)
# check valid dates interval
not_before = cert.get_not_before()
not_after = cert.get_not_after()
assert not_before < datetime.now(not_before.tzinfo)
assert not_after > datetime.now(not_after.tzinfo)
# --------- check utilities ---------
def check_json_contains(self, actual, expected):
# write all expected key:value bindings to a copy of the actual data ...
# ... assert it stays unchanged
test_json = copy.deepcopy(actual)
test_json.update(expected)
assert actual == test_json
def check_file_access(self, path, exp_mask):
actual_mask = os.lstat(path).st_mode & 0o777
assert oct(actual_mask) == oct(exp_mask)
def check_dir_empty(self, path):
assert os.listdir(path) == []
def get_http_status(self, domain, path, use_https=True):
r = self.get_meta(domain, path, use_https, insecure=True)
return r.response['status']
def get_cert(self, domain, tls=None, ciphers=None):
return MDCertUtil.load_server_cert(self._httpd_addr, self.https_port,
domain, tls=tls, ciphers=ciphers)
def get_server_cert(self, domain, proto=None, ciphers=None):
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if ciphers is not None:
args.extend(["-cipher", ciphers])
r = self.run(args)
# noinspection PyBroadException
try:
return MDCertUtil.parse_pem_cert(r.stdout)
except:
return None
def verify_cert_key_lenghts(self, domain, pkeys):
for p in pkeys:
cert = self.get_server_cert(domain, proto="tls1_2", ciphers=p['ciphers'])
if 0 == p['keylen']:
assert cert is None
else:
assert cert, "no cert returned for cipher: {0}".format(p['ciphers'])
assert cert.get_key_length() == p['keylen'], "key length, expected {0}, got {1}".format(
p['keylen'], cert.get_key_length()
)
def get_meta(self, domain, path, use_https=True, insecure=False):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}", insecure=insecure)
assert r.exit_code == 0
assert r.response
assert r.response['header']
return r
def get_content(self, domain, path, use_https=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}")
assert r.exit_code == 0
return r.stdout
def get_json_content(self, domain, path, use_https=True, insecure=False,
debug_log=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
url = f"{schema}://{domain}:{port}{path}"
r = self.curl_get(url, insecure=insecure, debug_log=debug_log)
if r.exit_code != 0:
log.error(f"curl get on {url} returned {r.exit_code}"
f"\nstdout: {r.stdout}"
f"\nstderr: {r.stderr}")
assert r.exit_code == 0, r.stderr
return r.json
def get_certificate_status(self, domain) -> Dict:
return self.get_json_content(domain, "/.httpd/certificate-status", insecure=True)
def get_md_status(self, domain, via_domain=None, use_https=True, debug_log=False) -> Dict:
if via_domain is None:
via_domain = self._default_domain
return self.get_json_content(via_domain, f"/md-status/{domain}",
use_https=use_https, debug_log=debug_log)
def get_server_status(self, query="/", via_domain=None, use_https=True):
if via_domain is None:
via_domain = self._default_domain
return self.get_content(via_domain, "/server-status%s" % query, use_https=use_https)
def await_completion(self, names, must_renew=False, restart=True, timeout=60,
via_domain=None, use_https=True):
try_until = time.time() + timeout
renewals = {}
names = names.copy()
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
mds = self.get_md_status(name, via_domain=via_domain, use_https=use_https)
if mds is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in mds:
renewal = mds['renewal']
renewals[name] = True
if 'finished' in renewal and renewal['finished'] is True:
if (not must_renew) or (name in renewals):
log.debug(f"domain cert was renewed: {name}")
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
if restart:
time.sleep(0.1)
return self.apache_restart() == 0
return True
def is_renewing(self, name):
stat = self.get_certificate_status(name)
return 'renewal' in stat
def await_renewal(self, names, timeout=60):
try_until = time.time() + timeout
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
md = self.get_md_status(name)
if md is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in md:
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
return True
def await_error(self, domain, timeout=60, via_domain=None, use_https=True, errors=1):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
md = self.get_md_status(domain, via_domain=via_domain, use_https=use_https)
if md:
if 'state' in md and md['state'] == MDTestEnv.MD_S_ERROR:
return md
if 'renewal' in md and 'errors' in md['renewal'] \
and md['renewal']['errors'] >= errors:
return md
time.sleep(0.1)
return None
def await_file(self, fpath, timeout=60):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
if os.path.isfile(fpath):
return True
time.sleep(0.1)
def check_file_permissions(self, domain):
md = self.a2md(["list", domain]).json['output'][0]
assert md
acct = md['ca']['account']
assert acct
self.check_file_access(self.path_store_json(), 0o600)
# domains
self.check_file_access(self.store_domains(), 0o700)
self.check_file_access(os.path.join(self.store_domains(), domain), 0o700)
self.check_file_access(self.store_domain_file(domain, 'privkey.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'pubcert.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'md.json'), 0o600)
# archive
self.check_file_access(self.store_archived_file(domain, 1, 'md.json'), 0o600)
# accounts
self.check_file_access(os.path.join(self._store_dir, 'accounts'), 0o755)
self.check_file_access(os.path.join(self._store_dir, 'accounts', acct), 0o755)
self.check_file_access(self.path_account(acct), 0o644)
self.check_file_access(self.path_account_key(acct), 0o644)
# staging
self.check_file_access(self.store_stagings(), 0o755)
def get_ocsp_status(self, domain, proto=None, cipher=None, ca_file=None):
stat = {}
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", ca_file if ca_file else self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if cipher is not None:
args.extend(["-cipher", cipher])
r = self.run(args, debug_log=False)
ocsp_regex = re.compile(r'OCSP response: +([^=\n]+)\n')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
if 'ocsp' not in stat:
ocsp_regex = re.compile(r'OCSP Response Status:\s*(.+)')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
verify_regex = re.compile(r'Verify return code:\s*(.+)')
matches = verify_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['verify'] = m.group(1)
return stat
def await_ocsp_status(self, domain, timeout=10, ca_file=None):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
break
stat = self.get_ocsp_status(domain, ca_file=ca_file)
if 'ocsp' in stat and stat['ocsp'] != "no response sent":
return stat
time.sleep(0.1)
raise TimeoutError(f"ocsp respopnse not available: {domain}")
def create_self_signed_cert(self, name_list, valid_days, serial=1000, path=None):
dirpath = path
if not path:
dirpath = os.path.join(self.store_domains(), name_list[0])
return MDCertUtil.create_self_signed_cert(dirpath, name_list, valid_days, serial)
|
StarcoderdataPython
|
1762499
|
"""
TF-explain Library
The library implements interpretability methods as Tensorflow 2.0
callbacks to ease neural network's understanding.
"""
__version__ = "0.2.1"
try:
import cv2
except:
raise ImportError(
"TF-explain requires Opencv. " "Install Opencv via `pip install opencv-python`"
) from None
try:
import tensorflow as tf
except ImportError:
raise ImportError(
"TF-explain requires TensorFlow 2.0 or higher. "
"Install TensorFlow via `pip install tensorflow`"
) from None
from . import core
from . import callbacks
from . import utils
|
StarcoderdataPython
|
3282396
|
from datetime import datetime, date, timedelta
import unittest
from businesstime import BusinessTime
from businesstime.holidays.usa import USFederalHolidays
class BusinessTimeTest(unittest.TestCase):
def setUp(self):
"""
Tests mostly based around January 2014, where two holidays, New Years Day
and MLK day, fall on the 1st and 20th, respectively.
January 2014
Su Mo Tu We Th Fr Sa
1 2 3 4
5 6 7 8 9 10 11
12 13 14 15 16 17 18
19 20 21 22 23 24 25
26 27 28 29 30 31
"""
self.bt = BusinessTime(holidays=USFederalHolidays())
def test_iterdays(self):
start = datetime(2014, 1, 16)
end = datetime(2014, 1, 22)
self.assertEqual(
tuple(self.bt.iterdays(start, end)),
(datetime(2014, 1, 16), datetime(2014, 1, 17), datetime(
2014, 1, 18), datetime(2014, 1, 19), datetime(2014, 1, 20),
datetime(2014, 1, 21)))
def test_iterdays_same_day(self):
start = datetime(2014, 1, 16, 12, 15)
end = datetime(2014, 1, 16, 12, 16)
self.assertEqual(
tuple(self.bt.iterdays(start, end)), (datetime(2014, 1, 16), ))
def test_iterdays_clears_time(self):
start = datetime(2014, 1, 16, 12, 12, 11)
end = datetime(2014, 1, 18, 15)
self.assertEqual(
tuple(self.bt.iterdays(start, end)),
(datetime(2014, 1, 16), datetime(2014, 1, 17)))
def test_iterweekdays(self):
start = datetime(2014, 1, 16)
end = datetime(2014, 1, 22)
self.assertEqual(
tuple(self.bt.iterweekdays(start, end)),
(datetime(2014, 1, 16), datetime(2014, 1, 17), datetime(
2014, 1, 20), datetime(2014, 1, 21)))
def test_iterbusinessdays(self):
start = datetime(2014, 1, 16)
end = datetime(2014, 1, 22)
self.assertEqual(
tuple(self.bt.iterbusinessdays(start, end)), (datetime(
2014, 1, 16), datetime(2014, 1, 17), datetime(2014, 1, 21)))
def test_iterbusinessdays_conforms_to_business_hours(self):
start = datetime(2014, 1, 16, 17, 1)
end = datetime(2014, 1, 23, 2)
self.assertEqual(
tuple(self.bt.iterbusinessdays(start, end)), (datetime(
2014, 1, 17), datetime(2014, 1, 21), datetime(2014, 1, 22)))
def test_isduringbusinessday(self):
self.assertTrue(
self.bt.isduringbusinesshours(datetime(2014, 1, 15, 12)))
self.assertFalse(self.bt.isduringbusinesshours(datetime(2014, 1, 15)))
self.assertFalse(
self.bt.isduringbusinesshours(datetime(2014, 1, 18, 11)))
self.assertFalse(
self.bt.isduringbusinesshours(datetime(2014, 1, 20, 11, 46, 43)))
def test_holidays_specified_as_list(self):
bd = BusinessTime(holidays=[date(2014, 1, 1)])
self.assertTrue(bd.isholiday(date(2014, 1, 1)))
self.assertFalse(bd.isholiday(date(2014, 1, 2)))
def test_no_holidays(self):
bt = BusinessTime()
self.assertFalse(bt.isholiday(date(2014, 1, 1)))
def test_businesstimedelta_after_during(self):
start = datetime(2014, 1, 16, 18, 30)
end = datetime(2014, 1, 22, 10, 0)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=2, hours=1))
def test_businesstimedelta_1_minute_after_during(self):
"""https://github.com/seatgeek/businesstime/issues/7"""
start = datetime(2015, 2, 23, 17, 0)
end = datetime(2015, 2, 24, 14, 20)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(hours=5, minutes=20))
start = datetime(2015, 2, 23, 17, 1)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(hours=5, minutes=20))
def test_businesstimedelta_nonbusiness_after(self):
start = datetime(2014, 1, 12, 12)
end = datetime(2014, 1, 17, 19, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=4, hours=8))
def test_businesstimedelta_before_after(self):
start = datetime(2014, 1, 13, 4)
end = datetime(2014, 1, 17, 19, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=4, hours=8))
def test_businesstimedelta_during_after(self):
start = datetime(2014, 1, 30, 12, 15)
end = datetime(2014, 1, 31, 19, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(days=1, hours=4, minutes=45))
def test_businesstimedelta_during_before(self):
start = datetime(2014, 8, 4, 11)
end = datetime(2014, 8, 6, 5)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=1, hours=6))
def test_businesstimedelta_before_before(self):
start = datetime(2014, 8, 4, 1)
end = datetime(2014, 8, 4, 5)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=0))
def test_businesstimedelta_after_after(self):
start = datetime(2014, 8, 4, 22)
end = datetime(2014, 8, 4, 23)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=0))
def test_businesstimedelta_during_nonbusiness(self):
start = datetime(2014, 1, 10, 16, 15)
end = datetime(2014, 1, 12, 12, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(minutes=45))
def test_businesstimedelta_during_nonbusiness2(self):
start = datetime(2014, 1, 9, 16, 15)
end = datetime(2014, 1, 12, 12, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(
days=1, minutes=45))
def test_businesstimedelta_after_nonbusiness(self):
start = datetime(2014, 1, 10, 17, 15)
end = datetime(2014, 1, 12, 12, 30)
self.assertEqual(self.bt.businesstimedelta(start, end), timedelta())
def test_businesstimedelta_during_during(self):
start = datetime(2014, 1, 2, 9, 12)
end = datetime(2014, 1, 3, 9, 10)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(hours=7, minutes=58))
def test_businesstimedelta_during_during2(self):
start = datetime(2014, 1, 2, 9, 10)
end = datetime(2014, 1, 3, 9, 12)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(
days=1, minutes=2))
def test_businesstimedelta_during_during3(self):
start = datetime(2014, 1, 2, 9, 10)
end = datetime(2014, 1, 2, 9, 12)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(minutes=2))
def test_businesstimedelta_nonbusiness_nonbusiness(self):
start = datetime(2014, 1, 4, 9, 10)
end = datetime(2014, 1, 4, 9, 12)
self.assertEqual(self.bt.businesstimedelta(start, end), timedelta())
def test_businesstimedelta_exactly_one_day(self):
start = datetime(2014, 1, 7, 10)
end = datetime(2014, 1, 8, 10)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=1))
def test_businesstimedelta_exactly_one_day2(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/3
"""
start = datetime(2014, 1, 7, 9)
end = datetime(2014, 1, 8, 9)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=1))
def test_businesstimedelta_during_during_reverse(self):
end = datetime(2014, 1, 2, 9, 12)
start = datetime(2014, 1, 3, 9, 10)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(hours=-7, minutes=-58))
def test_businesstime_hours_exactly_one_day(self):
start = datetime(2014, 1, 16, 9, 0)
end = datetime(2014, 1, 17, 9, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=8))
def test_businesstime_hours_one_day(self):
start = datetime(2014, 1, 16, 9, 0)
end = datetime(2014, 1, 17, 15, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=14))
def test_businesstime_hours_one_day_reverse(self):
start = datetime(2014, 1, 17, 9, 0)
end = datetime(2014, 1, 16, 9, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=-8))
def test_businesstime_out_of_hours_start(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/13
"""
start = datetime(2014, 8, 9, 9, 0)
end = datetime(2014, 8, 11, 17, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=8))
def test_businesstime_out_of_hours_start_end(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/13
"""
start = datetime(2014, 8, 9, 9, 0)
end = datetime(2014, 8, 11, 23, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=8))
def test_businesstime_out_of_hours_end(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/13
"""
start = datetime(2014, 8, 8, 9, 0)
end = datetime(2014, 8, 11, 23, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=16))
def test_businesstime_holidays_date_desc(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/25
"""
bt_cal = BusinessTime(holidays=USFederalHolidays())
non_holiday = datetime(2018, 5, 31, 12, 0)
memorial_day_2017 = datetime(2017, 5, 29, 12, 0)
memorial_day_2018 = datetime(2018, 5, 28, 12, 0)
# Note that we test the later memorial day first, internally populating
# the holidays cache starting with memorial day 2018. We then verify
# that memorial day 2017 is properly classified as a holiday.
is_memorial_day_2018_holiday = bt_cal.isholiday(memorial_day_2018)
is_memorial_day_2017_holiday = bt_cal.isholiday(memorial_day_2017)
is_non_holiday_holiday = bt_cal.isholiday(non_holiday)
self.assertTrue(is_memorial_day_2017_holiday)
self.assertTrue(is_memorial_day_2018_holiday)
self.assertFalse(is_non_holiday_holiday)
def test_lots_of_holidays(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/25
"""
bt_cal = BusinessTime(holidays=USFederalHolidays())
non_holiday = datetime(2018, 5, 31, 12, 0)
non_holiday2 = datetime(2018, 2, 3, 12, 0)
non_holiday3 = datetime(2018, 6, 4, 12, 0)
non_holiday4 = datetime(2018, 11, 21, 12, 0)
memorial_day = datetime(2018, 5, 28, 12, 0)
new_year_day = datetime(2018, 1, 1, 12, 0)
labor_day = datetime(2018, 9, 3, 12, 0)
christmas = datetime(2018, 12, 25, 12, 0)
self.assertFalse(bt_cal.isholiday(non_holiday))
self.assertTrue(bt_cal.isholiday(memorial_day))
self.assertTrue(bt_cal.isholiday(new_year_day))
self.assertFalse(bt_cal.isholiday(non_holiday2))
self.assertFalse(bt_cal.isholiday(non_holiday4))
self.assertTrue(bt_cal.isholiday(labor_day))
self.assertFalse(bt_cal.isholiday(non_holiday3))
self.assertTrue(bt_cal.isholiday(christmas))
|
StarcoderdataPython
|
1608608
|
<reponame>ricklentz/tdw
from typing import Optional, Union, Tuple
from pathlib import Path
import os
from platform import system
from subprocess import check_output, Popen, call
import re
from psutil import pid_exists
class AudioUtils:
"""
Utility class for recording audio in TDW using [fmedia](https://stsaz.github.io/fmedia/).
Usage:
```python
from tdw.audio_utils import AudioUtils
from tdw.controller import Controller
c = Controller()
initialize_trial() # Your code here.
# Begin recording audio. Automatically stop recording at 10 seconds.
AudioUtils.start(output_path="path/to/file.wav", until=(0, 10))
do_trial() # Your code here.
# Stop recording.
AudioUtils.stop()
```
"""
# The process ID of the audio recorder.
RECORDER_PID: Optional[int] = None
# The audio capture device.
DEVICE: Optional[str] = None
@staticmethod
def get_system_audio_device(device_name: str = None) -> str:
"""
:param device_name: The name of the audio capture device. If None, defaults to `"Stereo Mix"` (Windows and Linux) or `"iShowU Audio Capture"` (OS X).
:return: The audio device that can be used to capture system audio.
"""
# Set a default device name.
if device_name is None:
if system() == "Darwin":
device_name = "iShowU Audio Capture"
else:
device_name = "Stereo Mix"
devices = check_output(["fmedia", "--list-dev"]).decode("utf-8").split("Capture:")[1]
dev_search = re.search(f"device #(.*): {device_name}", devices, flags=re.MULTILINE)
assert dev_search is not None, "No suitable audio capture device found:\n" + devices
return dev_search.group(1)
@staticmethod
def start(output_path: Union[str, Path], until: Optional[Tuple[int, int]] = None, device_name: str = None) -> None:
"""
Start recording audio.
:param output_path: The path to the output file.
:param until: If not None, fmedia will record until `minutes:seconds`. The value must be a tuple of 2 integers. If None, fmedia will record until you send `AudioUtils.stop()`.
:param device_name: The name of the audio capture device. If None, defaults to `"Stereo Mix"` (Windows and Linux) or `"iShowU Audio Capture"` (OS X).
"""
if isinstance(output_path, str):
p = Path(output_path).resolve()
else:
p = output_path
# Create the directory.
if not p.parent.exists():
p.parent.mkdir(parents=True)
# Set the capture device.
if AudioUtils.DEVICE is None:
AudioUtils.DEVICE = AudioUtils.get_system_audio_device(device_name=device_name)
fmedia_call = ["fmedia",
"--record",
f"--dev-capture={AudioUtils.DEVICE}",
f"--out={str(p.resolve())}",
"--globcmd=listen"]
# Automatically stop recording.
if until is not None:
fmedia_call.append(f"--until={str(until[0]).zfill(2)}:{str(until[1]).zfill(2)}")
with open(os.devnull, "w+") as f:
AudioUtils.RECORDER_PID = Popen(fmedia_call,
stderr=f).pid
@staticmethod
def stop() -> None:
"""
Stop recording audio (if any fmedia process is running).
"""
if AudioUtils.RECORDER_PID is not None:
with open(os.devnull, "w+") as f:
call(['fmedia', '--globcmd=quit'], stderr=f, stdout=f)
AudioUtils.RECORDER_PID = None
@staticmethod
def is_recording() -> bool:
"""
:return: True if the fmedia recording process still exists.
"""
return AudioUtils.RECORDER_PID is not None and pid_exists(AudioUtils.RECORDER_PID)
|
StarcoderdataPython
|
1793245
|
from django.http import HttpResponse
from django.shortcuts import render
from shoppingcartproject.productmodels import productmodel
from django.contrib import messages
def displayproduct(request):
return render(request, 'products.html')
|
StarcoderdataPython
|
1753143
|
import argparse
import os
import random
import shutil
import time
import warnings
import numpy as np
from progress.bar import (Bar, IncrementalBar)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import folder2lmdb
import CustomBatchSampler
import cv2
from models.voc.mbv2_yolo import yolo
from models.voc.yolo_loss import *
from utils import Bar, Logger, AverageMeter
from utils.eval_mAP import *
from pprint import PrettyPrinter
import yaml
import nni
from nni.utils import merge_parameter
from nni.trial import get_sequence_id
from nni.trial import get_trial_id
pp = PrettyPrinter()
from torch.utils.tensorboard import SummaryWriter
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def main(args):
#print('NNI_OUTPUT_DIR',os.environ["NNI_OUTPUT_DIR"])
#writer = SummaryWriter(os.environ["NNI_OUTPUT_DIR"]+'/tensorboard/')
if 'NNI_OUTPUT_DIR' not in os.environ:
writer = SummaryWriter('tensorboard/')
else:
writer = SummaryWriter(os.environ["NNI_OUTPUT_DIR"]+'/tensorboard/')
with open('models/voc/config.yaml', 'r') as f:
config = yaml.load(f)
with open('data/voc_data.yaml', 'r') as f:
dataset_path = yaml.load(f)
if args.ignore_thresh_1 != None :
config["yolo"]["ignore_thresh"][0] = args.ignore_thresh_1
if args.ignore_thresh_2 != None :
config["yolo"]["ignore_thresh"][1] = args.ignore_thresh_2
if args.iou_thresh != None :
config["yolo"]["iou_thresh"] = args.iou_thresh
if args.expand_scale != None :
config["expand_scale"] = args.expand_scale
if args.mosaic_num != None :
config["mosaic_num"] = args.mosaic_num
if args.iou_weighting != None :
config["iou_weighting"] = args.iou_weighting
print(config)
best_acc = 0 # best test accuracy
#args = parser.parse_args()
start_epoch = 0
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
image_folder = folder2lmdb.ImageFolderLMDB
train_dataset = image_folder(
db_path=dataset_path["trainval_dataset_path"]["lmdb"],
transform_size=config["train_img_size"],
phase='train',batch_size = config["batch_size"],
expand_scale=config["expand_scale"]
)
test_dataset = image_folder(
db_path=dataset_path["test_dataset_path"]["lmdb"],
transform_size=[[config["img_w"],config["img_h"]]],
phase='test',batch_size = config["batch_size"]
)
BatchSampler = CustomBatchSampler.GreedyBatchSampler
sampler = BatchSampler (
torch.utils.data.sampler.RandomSampler(train_dataset),
batch_size=config["batch_size"],
drop_last=False,sample=config["mosaic_num"])
train_loader = torch.utils.data.DataLoader(
train_dataset,batch_sampler = sampler,
num_workers=4, pin_memory=True,collate_fn=train_dataset.collate_fn,
worker_init_fn=seed_worker)
test_loader = torch.utils.data.DataLoader(
test_dataset, config["batch_size"], shuffle=False,
num_workers=4, pin_memory=True,collate_fn=test_dataset.collate_fn)
model = yolo(config=config)
#model_for_graph = yolo_graph(config=config)
#input = torch.randn(1, 3, 352, 352)
#writer.add_graph(model_for_graph,input)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.cuda()
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
biases = list()
not_biases = list()
params = model.parameters()
optimizer = optim.AdamW(params=params,lr = args.learning_rate,weight_decay= args.weight_decay)
if not os.path.exists(args.checkpoint):
os.makedirs(args.checkpoint)
title = 'voc-training-process'
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
print(args.resume)
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
model.yolo_losses[0].val_conf = checkpoint['conf']
model.yolo_losses[1].val_conf = checkpoint['conf']
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
#for param_group in optimizer.param_groups:
# param_group['lr'] = args.lr
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Epoch ', 'Loss ', 'Precision ', 'Time ', 'IOU ', 'Learning Rate'])
test_acc = 0
if args.evaluate:
for epoch in range(1):
test_acc = test(test_loader, model, optimizer, epoch , config)
return
#ls = len(args.warm_up)
for epoch in range(start_epoch, args.epochs):
if epoch in args.warm_up:
adjust_learning_rate(optimizer, 0.5)
for epoch in range(start_epoch, args.epochs):
# train for one epoch
if epoch in args.warm_up:
adjust_learning_rate(optimizer, 2)
if epoch in args.schedule:
#load_best_checkpoint(model=model, save_path=args.save_path)
save_checkpoint({
'epoch': epoch ,
'model': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
'conf' : model.yolo_losses[0].val_conf,
}, False,model,config, checkpoint=args.checkpoint,filename='epoch%d_checkpoint.pth.tar'%epoch,export_path = args.export)
adjust_learning_rate(optimizer, 0.5)
print('adjusted to current lr: '
'{}'.format([param_group['lr'] for param_group in optimizer.param_groups]))
log = False
if epoch%2 == 0 :
log = True
st = time.time()
print('\nEpoch: [%3d | %3d] LR: %f | loss | cnt | iou | obj | no_obj | class | recall | cnt2 | iou2 | obj2 | no_obj2 | class2 | recall2 |' \
% (epoch, args.epochs, optimizer.param_groups[0]['lr']))
train_loss,iou = train(train_loader, model, optimizer, epoch,sampler)
writer.add_scalar('Loss/train', train_loss, epoch)
writer.add_scalar('iou/train', iou, epoch)
if not log :
test_acc = test(test_loader, model, optimizer, epoch , config)
nni.report_intermediate_result(test_acc)
logger.append([epoch + 1, train_loss , test_acc, time.time()-st,iou, optimizer.param_groups[0]['lr']])
# save model
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'model': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
'conf' : model.yolo_losses[0].val_conf,
}, is_best,model,config, checkpoint=args.checkpoint,export_path = args.export)
writer.add_scalar('Accuracy/test', test_acc, epoch+ 1)
nni.report_final_result(best_acc)
def train(train_loader, model, optimizer,epoch,sampler):
model.train()
bar = IncrementalBar('Training', max=len(sampler),width=12)
#batch_time = AverageMeter()
#data_time = AverageMeter()
losses = AverageMeter()
recall = [AverageMeter(),AverageMeter()]
iou = [AverageMeter(),AverageMeter()]
obj = [AverageMeter(),AverageMeter()]
no_obj = [AverageMeter(),AverageMeter()]
conf_loss = [AverageMeter(),AverageMeter()]
cls_loss = [AverageMeter(),AverageMeter()]
cls_score = [AverageMeter(),AverageMeter()]
count = [AverageMeter(),AverageMeter()]
#end = time.time()
for batch_idx, (images,targets,total_num) in enumerate(train_loader):
#print('\n1-',sum(sampler.get_mosaic_array()),'\n')
#print('1-',sampler.mosaic_array,'\n')
#print(targets)
#data_time.update(time.time() - end)
bs = images.size(0)
#print(images.shape)
#print(i,targets[0])
optimizer.zero_grad()
images = images.to(device) # (batch_size (N), 3, H, W)
outputs = model(images,targets)
#losses0 = yolo_losses[0](outputs[0],targets)
#losses1 = yolo_losses[1](outputs[1],targets)
t_loss = list()
for i,l in enumerate(outputs):
#print(l[0])
t_loss.append(l[0])
recall[i].update(l[1])
iou[i].update(l[2])
obj[i].update(l[3])
no_obj[i].update(l[4])
cls_score[i].update(l[5])
count[i].update(l[6])
#conf_loss.update(l[5])
#cls_loss.update(l[6])
loss = sum(t_loss)
losses.update(loss.item(),bs)
loss.backward()
optimizer.step()
# measure elapsed time
#batch_time.update(time.time() - end)
#end = time.time()
bar.suffix = \
'%(percent)3d%% | {total:} | {loss:.4f} | {cnt1:2.1f} | {iou1:.3f} | {obj1:.3f} | {no_obj1:.4f} | {cls1:.3f} | {rec1:.3f} | {cnt2:2.1f} | {iou2:.3f} | {obj2:.3f} | {no_obj2:.4f} | {cls2:.3f} | {rec2:.3f} |'\
.format(
#batch=batch_idx + 1,
#size=len(train_loader),
#data=data_time.avg,
#bt=batch_time.avg,
total=bar.elapsed_td,
loss=losses.avg,
#loss1=losses[0].avg,
#loss2=losses[1].avg,
cnt1=(count[0].avg),
cnt2=(count[1].avg),
#recall=recall.avg,
iou1=iou[0].avg,
iou2=iou[1].avg,
obj1=obj[0].avg,
no_obj1=no_obj[0].avg,
cls1=cls_score[0].avg,
obj2=obj[1].avg,
no_obj2=no_obj[1].avg,
cls2=cls_score[1].avg,
rec1=recall[0].avg,
rec2=recall[1].avg,
#cls=cls_loss.avg,
)
bar.next(total_num)
bar.finish()
return losses.avg,(iou[0].avg+iou[1].avg)/2
def test(test_loader, model, optimizer,epoch , config):
# switch to evaluate mode
model.eval()
n_classes = config['yolo']['classes'];
end = time.time()
#bar = Bar('Validating', max=len(test_loader))
bar = IncrementalBar('Validating', max=len(test_loader),width=32)
#for batch_idx, (inputs, targets) in enumerate(testloader):
n_gt = [0]*n_classes
correct = [0]*n_classes
n_pred = [0]*n_classes
n_iou = [0]*n_classes
n_images = 0
det_boxes = list()
det_labels = list()
det_scores = list()
true_boxes = list()
true_labels = list()
true_difficulties = list()
gt_box = 0
pred_box = 0
for batch_idx, (images,targets) in enumerate(test_loader):
images = images.to(device) # (batch_size (N), 3, H, W)
labels = [torch.Tensor(l).to(device) for l in targets]
bs = len(labels)
# compute output
with torch.no_grad():
detections = model(images) # (N, num_defaultBoxes, 4), (N, num_defaultBoxes, n_classes)
for sample_i in range(bs):
# Get labels for sample where width is not zero (dummies)
# print(len(labels[0]),labels[sample_i])
target_sample = labels[sample_i]
gt_box = gt_box + len(target_sample)
tx1, tx2 = torch.unsqueeze((target_sample[...,1] - target_sample[...,3] / 2),1), torch.unsqueeze((target_sample[...,1] + target_sample[...,3] / 2),1)
ty1, ty2 = torch.unsqueeze((target_sample[...,2] - target_sample[...,4] / 2),1), torch.unsqueeze((target_sample[...,2] + target_sample[...,4] / 2),1)
box = torch.cat((tx1,ty1,tx2,ty2),1)
size = target_sample.size(0)
true_boxes.append(box)
true_labels.append(target_sample[...,0])
true_difficulties.append(torch.zeros(size, requires_grad=False))
#print(detections[0][sample_i].shape,detections[1][sample_i].shape)
preds = detections[sample_i]
pred_box = pred_box + len(preds)
if preds is not None:
det_boxes.append(preds[...,:4])
det_labels.append((preds[...,6]+1).to(device))
conf = (preds[...,4] * preds[...,5]).to(device)
det_scores.append(conf)
else :
empty = torch.empty(0).to(device)
det_boxes.append(empty)
det_labels.append(empty)
det_scores.append(empty)
n_images = n_images + 1
# measure elapsed time
sum_gt = sum(n_gt)
sum_n_pred= sum(n_pred)
# plot progress
bar.suffix = '({batch}/{size}) | Total: {total:} | ETA: {eta:}| n_img: {n_img:} | gt_box: {gt_box:} | pred_box: {pred_box:}'.format(
batch=batch_idx + 1,
size=len(test_loader),
total=bar.elapsed_td,
eta=bar.eta_td,
n_img=n_images,
gt_box=gt_box,
pred_box=pred_box
)
bar.next()
bar.finish()
print("\nVal conf. is %f\n" % (model.yolo_losses[0].val_conf))
model.yolo_losses[0].val_conf = adjust_confidence(gt_box,pred_box,model.yolo_losses[0].val_conf)
model.yolo_losses[1].val_conf = adjust_confidence(gt_box,pred_box,model.yolo_losses[1].val_conf)
# Calculate mAP
APs, mAP, TP, FP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties, n_classes=21)
pp.pprint(APs)
print('\nMean Average Precision (mAP): %.3f' % mAP)
return mAP
def save_checkpoint(state, is_best,model,config, checkpoint='checkpoint', filename='checkpoint.pth.tar',export_path = 'checkpoint'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
#save_onnx(filepath,model)
if is_best:
torch.save(model, os.path.join(checkpoint, 'model_best.pth.tar'))
#dummy_input = torch.randn(1, 3, config["img_w"], config["img_h"]) #
#torch.onnx.export(model, dummy_input,os.path.join(export_path, 'model_best.onnx'))
def adjust_confidence(gt_box_num,pred_box_num,conf):
if pred_box_num>gt_box_num*3 :
conf = conf + 0.01
elif pred_box_num<gt_box_num*2 and conf>0.01:
conf = conf - 0.01
return conf
def adjust_learning_rate(optimizer, scale):
"""
Scale learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param scale: factor to multiply learning rate with.
"""
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * scale
print("Change learning rate.\n The new LR is %f\n" % (optimizer.param_groups[0]['lr']))
def get_params():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0.0004, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--learning_rate', default=0.0007, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--warm-up', '--warmup', default=[], type=float,
metavar='warmup', help='warm up learning rate')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--schedule', type=int, nargs='+', default=[100,170,240],
help='Decrease learning rate at these epochs.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
#parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
# help='evaluate model on validation set')
parser.add_argument('-o', '--export', dest='export', default='checkpoint', type=str, metavar='PATH',
help='path to export checkpoint (default: checkpoint)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='Evaluate mAP? default=False')
parser.add_argument('--mosaic_num', default=None, type=int, help='mosaic number in image augmentation')
parser.add_argument('--ignore_thresh_1', default=None, type=float, help='ignore layer 1')
parser.add_argument('--ignore_thresh_2', default=None, type=float, help='ignore layer 2')
parser.add_argument('--iou_thresh', default=None, type=float, help='ignore iou thresh')
parser.add_argument('--expand_scale', default=None, type=float, help='image augmentation expand scale')
parser.add_argument('--iou_weighting', default=None, type=float, help='iou loss weighting')
args = parser.parse_args()
return args
if __name__ == '__main__':
try:
# get parameters form tuner
tuner_params = nni.get_next_parameter()
#logger.debug(tuner_params)
print(tuner_params)
params = merge_parameter(get_params(), tuner_params)
id = get_sequence_id()
params.checkpoint = 'checkpoints/%d' % id
#print(params)
main(params)
except Exception as exception:
#logger.exception(exception)
raise
|
StarcoderdataPython
|
1619937
|
from autodesk.scheduler import Scheduler
from autodesk.states import UP, DOWN
from pandas import Timedelta
def test_active_for_30minutes_with_60minute_limit_and_desk_down():
active_time = Timedelta(minutes=30)
limits = (Timedelta(minutes=60), Timedelta(minutes=30))
scheduler = Scheduler(limits)
delay = scheduler.compute_delay(active_time, DOWN)
assert delay == Timedelta(minutes=30)
def test_active_for_30minutes_with_30minute_limit_and_desk_up():
active_time = Timedelta(minutes=30)
limits = (Timedelta(minutes=60), Timedelta(minutes=30))
scheduler = Scheduler(limits)
delay = scheduler.compute_delay(active_time, UP)
assert delay == Timedelta(0)
|
StarcoderdataPython
|
3339105
|
<reponame>parikshitgupta1/leetcode<gh_stars>0
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
def invert(root):
if root == None:
return
else:
# temp = root
root.left, root.right = root.right, root.left
invert(root.right)
invert(root.left)
# temp = root.left
# root.left = root.right
# root.right = temp
return root
return invert(root)
|
StarcoderdataPython
|
1795400
|
class Classification:
def __init__(self, decision=0, indexList=0):
self.cObject = ""
self.listOfClassifiedCorrectly = 0
self.listOfClassified = 0
def setCObject(self, a):
self.cObject = a
def setListOfClassifiedCorrectly(self, a):
self.listOfClassifiedCorrectly = a
def setListOfClassified(self, a):
self.listOfClassified = a
def getCObject(self):
return self.cObject
def getListOfClassifiedCorrectly(self):
return self.listOfClassifiedCorrectly
def getListOfClassified(self):
return self.listOfClassified
|
StarcoderdataPython
|
3363824
|
import os, platform, sys
import IDLC.idldocument as IDLDocument
import IDLC.idlproperty as IDLProperty
import IDLC.idlprotocol as IDLProtocol
import sjson
import IDLC.filewriter
import genutil as util
import ntpath
class IDLCodeGenerator:
def __init__(self):
self.document = None
self.documentPath = ""
self.version = 0
#------------------------------------------------------------------------------
##
#
def SetVersion(self, v):
self.version = v
#------------------------------------------------------------------------------
##
#
def SetDocument(self, input) :
self.documentPath = input
self.documentBaseName = os.path.splitext(input)[0]
self.documentDirName = os.path.dirname(self.documentBaseName)
head, tail = ntpath.split(self.documentBaseName)
self.documentFileName = tail or ntpath.basename(head)
fstream = open(self.documentPath, 'r')
self.document = sjson.loads(fstream.read())
fstream.close()
#------------------------------------------------------------------------------
##
#
def GenerateHeader(self, hdrPath) :
f = filewriter.FileWriter()
f.Open(hdrPath)
f.WriteLine("// NIDL #version:{}#".format(self.version))
propertyLibraries = []
# Add additional dependencies to document.
if "dependencies" in self.document:
for dependency in self.document["dependencies"]:
fileName = '{}.h'.format(os.path.splitext(dependency)[0]).lower()
propertyLibraries.append(fileName)
if "messages" in self.document:
IDLDocument.AddInclude(f, "game/messaging/message.h")
IDLProperty.ParseProperties(self.document)
if (IDLProperty.ContainsResourceTypes()):
IDLDocument.AddInclude(f, "resources/resource.h")
if (IDLProperty.ContainsEntityTypes()):
IDLDocument.AddInclude(f, "game/entity.h")
IDLDocument.WriteIncludeHeader(f)
IDLDocument.WriteIncludes(f, self.document)
IDLDocument.WriteIncludes(f, propertyLibraries)
hasMessages = "messages" in self.document
hasProperties = "properties" in self.document
hasEnums = "enums" in self.document
if hasProperties or hasMessages or hasEnums:
IDLDocument.BeginNamespace(f, self.document)
if hasEnums:
IDLProperty.WriteEnumeratedTypes(f, self.document)
if hasMessages:
IDLProtocol.WriteMessageDeclarations(f, self.document)
if hasProperties:
IDLProperty.WritePropertyHeaderDeclarations(f, self.document)
IDLDocument.BeginNamespaceOverride(f, self.document, "Details")
IDLProperty.WritePropertyHeaderDetails(f, self.document)
IDLDocument.EndNamespaceOverride(f, self.document, "Details")
f.WriteLine("")
# Add additional dependencies to document.
if "dependencies" in self.document:
for dependency in self.document["dependencies"]:
fstream = open(dependency, 'r')
depDocument = sjson.loads(fstream.read())
deps = depDocument["properties"]
# Add all properties to this document
self.document["properties"].update(deps)
fstream.close()
IDLDocument.EndNamespace(f, self.document)
f.Close()
return
#------------------------------------------------------------------------------
##
#
def GenerateSource(self, srcPath, hdrPath) :
f = filewriter.FileWriter()
f.Open(srcPath)
f.WriteLine("// NIDL #version:{}#".format(self.version))
head, tail = ntpath.split(hdrPath)
hdrInclude = tail or ntpath.basename(head)
head, tail = ntpath.split(srcPath)
srcFileName = tail or ntpath.basename(head)
IDLDocument.WriteSourceHeader(f, srcFileName)
IDLDocument.AddInclude(f, hdrInclude)
IDLDocument.AddInclude(f, "core/sysfunc.h")
IDLDocument.AddInclude(f, "util/stringatom.h")
IDLDocument.AddInclude(f, "memdb/typeregistry.h")
IDLDocument.AddInclude(f, "game/propertyserialization.h")
IDLDocument.AddInclude(f, "game/propertyinspection.h")
hasMessages = "messages" in self.document
if hasMessages:
IDLDocument.AddInclude(f, "scripting/python/conversion.h")
# Add additional dependencies to document.
if "dependencies" in self.document:
for dependency in self.document["dependencies"]:
fstream = open(dependency, 'r')
depDocument = sjson.loads(fstream.read())
deps = depDocument["properties"]
# Add all properties to this document
self.document["properties"].update(deps)
fstream.close()
hasStructs = IDLProperty.HasStructProperties()
hasEnums = "enums" in self.document
if hasEnums or hasStructs:
IDLDocument.AddInclude(f, "pjson/pjson.h");
IDLDocument.BeginNamespaceOverride(f, self.document, "IO")
if hasEnums:
IDLProperty.WriteEnumJsonSerializers(f, self.document);
if hasStructs:
IDLProperty.WriteStructJsonSerializers(f, self.document);
IDLDocument.EndNamespaceOverride(f, self.document, "IO")
hasProperties = "properties" in self.document
if hasProperties or hasMessages:
IDLDocument.BeginNamespace(f, self.document)
if hasMessages:
IDLProtocol.WriteMessageImplementation(f, self.document)
if "properties" in self.document:
IDLDocument.BeginNamespaceOverride(f, self.document, "Details")
IDLProperty.WritePropertySourceDefinitions(f, self.document)
IDLDocument.EndNamespaceOverride(f, self.document, "Details")
f.WriteLine("")
IDLDocument.EndNamespace(f, self.document)
f.Close()
|
StarcoderdataPython
|
1692686
|
<reponame>Xiaoming94/TIFX05-MScThesis-HenryYang
import utils
import ANN as ann
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import utils.digitutils as dutils
import cv2
import keras.callbacks as clb
import keras.optimizers as opt
network_model1 = '''
{
"input_shape" : [784],
"layers" : [
{
"type" : "Dense",
"units" : 200,
"activation" : "relu",
"kernel_regularizer" : {
"type" : "l2",
"lambda" : 0.001
},
"activity_regularizer" : {
"type" : "l2",
"lambda" : 0.0001
}
},
{
"type" : "Dense",
"units" : 200,
"activation" : "relu",
"kernel_regularizer" : {
"type" : "l2",
"lambda" : 0.001
},
"activity_regularizer" : {
"type" : "l2",
"lambda" : 0.0001
}
},
{
"type" : "Dense",
"units" : 200,
"activation" : "relu",
"kernel_regularizer" : {
"type" : "l2",
"lambda" : 0.001
},
"activity_regularizer" : {
"type" : "l2",
"lambda" : 0.0001
}
},
{
"type" : "Dense",
"units" : 10,
"activation" : "softmax",
"kernel_regularizer" : {
"type" : "l2",
"lambda" : 0.001
},
"activity_regularizer" : {
"type" : "l2",
"lambda" : 0.0001
}
}
]
}
'''
network_model2 = '''
{
"input_shape" : [28,28,1],
"layers" : [
{
"type" : "Conv2D",
"units" : 48,
"kernel_size" : [3,3],
"activation" : "relu",
"kernel_regularizer" : {
"type" : "l2",
"lambda" : 0.0001
}
},
{
"type" : "BatchNormalization",
"axis" : -1
},
{
"type" : "Conv2D",
"units" : 96,
"kernel_size" : [3,3],
"activation" : "relu",
"kernel_regularizer" : {
"type" : "l2",
"lambda" : 0.0001
},
},
{
"type" : "BatchNormalization",
"axis" : -1
},
{
"type" : "Conv2D",
"units" : 64,
"kernel_size" : [3,3],
"activation" : "relu",
"kernel_regularizer" : {
"type" : "l2",
"lambda" : 0.0001
},
},
{
"type" : "BatchNormalization",
"axis" : -1
},
{
"type" : "MaxPooling2D",
"pool_size" : [2,2],
"strides" : [2,2]
},
{
"type" : "Flatten"
},
{
"type" : "Dense",
"units" : 100,
"activation" : "relu",
"kernel_regularizer" : {
"type" : "l2",
"lambda" : 0.001
}
},
{
"type" : "Dense",
"units" : 10,
"activation" : "softmax"
"kernel_regularizer" : {
"type" : "l2",
"lambda" : 0.0001
},
"activity_regularizer" : {
"type" : "l2",
"lambda" : 0.0001
}
}
]
}
'''
def calc_pred_vars(mempred):
M,K = mempred.shape
cumsum = 0
for k in mempred:
cumsum += (np.sum(k*k)/K - ((np.sum(k)/K)**2))
return cumsum/M
def experiment(network_model, reshape_mode = 'mlp'):
reshape_funs = {
"conv" : lambda d : d.reshape(-1,28,28,1),
"mlp" : lambda d : d.reshape(-1,784)
}
xtrain,ytrain,xtest,ytest = utils.load_mnist()
reshape_fun = reshape_funs[reshape_mode]
xtrain,xtest = reshape_fun(xtrain),reshape_fun(xtest)
test_data = utils.load_processed_data('notmnist')
letters = list(test_data.keys())
ensemble_size = 20
epochs = 50
trials = 10
results = {
'A': [],
'B': [],
'C': [],
'D': [],
'E': [],
'F': [],
'G': [],
'H': [],
'I': [],
'J': []
}
for t in range(trials):
l_xtrain = []
l_xval = []
l_ytrain = []
l_yval = []
for _ in range(ensemble_size):
t_xtrain,t_ytrain,t_xval,t_yval = utils.create_validation(xtrain,ytrain,(1/6))
l_xtrain.append(t_xtrain)
l_xval.append(t_xval)
l_ytrain.append(t_ytrain)
l_yval.append(t_yval)
es = clb.EarlyStopping(monitor='val_loss',patience=2,restore_best_weights = True)
inputs, outputs, train_model, model_list, merge_model = ann.build_ensemble([network_model], pop_per_type=ensemble_size, merge_type="Average")
#print(np.array(train_model.predict([xtest]*ensemble_size)).transpose(1,0,2).shape)
train_model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['acc'])
train_model.fit(l_xtrain,l_ytrain,epochs = epochs, batch_size=100 ,validation_data = (l_xval,l_yval),callbacks=[es])
for letter in letters:
inputs = test_data[letter]
inputs = utils.normalize_data(inputs)
inputs = reshape_fun(inputs)
preds = merge_model.predict([inputs]*ensemble_size)
mem_preds = np.array(train_model.predict([inputs]*ensemble_size)).transpose(1,2,0)
bits = list(map(stats.entropy,preds))
s_q = list(map(calc_pred_vars,mem_preds))
results[letter].extend(list(zip(bits,s_q)))
return results
utils.setup_gpu_session()
ensemble = experiment(network_model1, 'mlp')
utils.save_processed_data(ensemble , "distribution_not_mnist")
#plt.figure()
#plt.subplot(221)
#plt.hist(ensemble['mnist_correct'],color = 'blue')
#plt.xlabel('entropy')
#plt.ylabel('ncorrect')
#plt.subplot(222)
#plt.hist(ensemble['mnist_wrong'],color = 'red')
#plt.xlabel('entropy')
#plt.ylabel('nwrong')
#plt.subplot(223)
#plt.hist(ensemble['digits_correct'],color = 'blue')
#plt.xlabel('entropy')
#plt.ylabel('ncorrect')
#plt.subplot(224)
#plt.hist(ensemble['digits_wrong'],color = 'red')
#plt.xlabel('entropy')
#plt.ylabel('nwrong')
#plt.show()
|
StarcoderdataPython
|
3318756
|
# Copyright (c) 2019 Works Applications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from string import ascii_lowercase
from itertools import product
from sudachipy import dictionary
class TestLargeUserDict(unittest.TestCase):
def setUp(self):
resource_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources')
self.dict_ = dictionary.Dictionary(os.path.join(resource_dir, 'sudachi_large_user.json'), resource_dir)
self.tokenizer_obj = self.dict_.create()
def test_part_of_speech(self):
ms = self.tokenizer_obj.tokenize('やまもも')
self.assertEqual(1, len(ms))
m = ms[0]
pid = m.part_of_speech_id()
self.assertTrue(self.dict_.grammar.get_part_of_speech_size() > pid)
# Exploit the cache space
num = 0
for combo in product(ascii_lowercase, repeat=3):
if num > 1024:
break
lex = ''.join(combo)
self.tokenizer_obj.tokenize(lex)
num += 1
ms = self.tokenizer_obj.tokenize('やまもも')
self.assertEqual(pid, ms[0].part_of_speech_id())
|
StarcoderdataPython
|
1712168
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
# It was just to try out the parsing do not try to change this with main models file
from django.db import models
class AuthGroup(models.Model):
name = models.CharField(unique=True, max_length=80)
class Meta:
managed = False
db_table = 'auth_group'
class AuthGroupPermissions(models.Model):
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
permission = models.ForeignKey('AuthPermission', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_group_permissions'
unique_together = (('group', 'permission'),)
class AuthPermission(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING)
codename = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'auth_permission'
unique_together = (('content_type', 'codename'),)
class AuthUser(models.Model):
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.BooleanField()
username = models.CharField(unique=True, max_length=150)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=150)
email = models.CharField(max_length=254)
is_staff = models.BooleanField()
is_active = models.BooleanField()
date_joined = models.DateTimeField()
class Meta:
managed = False
db_table = 'auth_user'
class AuthUserGroups(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_groups'
unique_together = (('user', 'group'),)
class AuthUserUserPermissions(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
permission = models.ForeignKey(AuthPermission, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_user_permissions'
unique_together = (('user', 'permission'),)
class DjangoAdminLog(models.Model):
action_time = models.DateTimeField()
object_id = models.TextField(blank=True, null=True)
object_repr = models.CharField(max_length=200)
action_flag = models.SmallIntegerField()
change_message = models.TextField()
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING, blank=True, null=True)
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_session'
class AdditionalFields(models.Model):
additional_fields_uuid = models.CharField(unique=True, max_length=255)
additional_fields = models.TextField() # This field type is a guess.
class Meta:
managed = False
db_table = 'quickstart_additionalfields'
class Frequency(models.Model):
frequency_uuid = models.CharField(unique=True, max_length=255)
frequency = models.CharField(max_length=255)
class Meta:
managed = False
db_table = 'quickstart_frequency'
class Indicator(models.Model):
indicator_uuid = models.CharField(unique=True, max_length=255)
level = models.CharField(max_length=255)
objectives = models.CharField(max_length=255)
name = models.CharField(max_length=255)
sector = models.TextField() # This field type is a guess.
subsector = models.TextField() # This field type is a guess.
tags = models.TextField(blank=True, null=True) # This field type is a guess.
number = models.CharField(max_length=255, blank=True, null=True)
definition = models.TextField(blank=True, null=True)
justification = models.TextField(blank=True, null=True)
unit_of_measure = models.CharField(max_length=135, blank=True, null=True)
disaggregation = models.CharField(max_length=255)
baseline = models.CharField(max_length=255, blank=True, null=True)
lop_target = models.IntegerField()
rationale_for_target = models.TextField(blank=True, null=True)
means_of_verification = models.CharField(max_length=255, blank=True, null=True)
data_collection_method = models.CharField(max_length=255, blank=True, null=True)
denominator = models.CharField(max_length=255, blank=True, null=True)
numerator = models.CharField(max_length=255, blank=True, null=True)
data_points = models.TextField(blank=True, null=True)
responsible_person = models.CharField(max_length=255, blank=True, null=True)
method_of_analysis = models.CharField(max_length=255, blank=True, null=True)
information_use = models.CharField(max_length=255, blank=True, null=True)
quality_assurance = models.TextField(blank=True, null=True)
data_issues = models.TextField(blank=True, null=True)
indicator_changes = models.TextField(blank=True, null=True)
comments = models.TextField(blank=True, null=True)
key_performance_indicator = models.BooleanField()
create_date = models.DateTimeField(blank=True, null=True)
edit_date = models.DateTimeField(blank=True, null=True)
notes = models.TextField(blank=True, null=True)
additional_fields = models.ForeignKey(AdditionalFields, models.DO_NOTHING, blank=True, null=True)
data_collection_frequency = models.ForeignKey(Frequency, models.DO_NOTHING, blank=True, null=True)
data_source = models.ForeignKey('Source', models.DO_NOTHING, blank=True, null=True)
reporting_frequency = models.ForeignKey(Frequency, models.DO_NOTHING, blank=True, null=True)
class Meta:
managed = False
db_table = 'quickstart_indicator'
class Source(models.Model):
source_uuid = models.CharField(unique=True, max_length=255)
name = models.CharField(max_length=255)
class Meta:
managed = False
db_table = 'quickstart_source'
|
StarcoderdataPython
|
1628615
|
"""
A Python dictionary containing information to be associated with the twelve keys on a
MacroPad.
"""
from adafruit_macropad import MacroPad
macropad = MacroPad()
"""
** Understanding the Dictionary **
The following explains how to configure each entry below.
Sound:
Can be an integer for a tone in Hz, e.g.196, OR, a string for a wav file name, e.g. "cat.wav".
Label:
The label you would like to appear on the display. Should be limited to 6 characters to fit.
Keycode type:
You must update this to match the type of key sequence you're sending.
KC = Keycode
CC = ConsumerControlCode
Key sequence:
The Keycode, sequence of Keycodes, or ConsumerControlCode to send.
"""
shortcut_keys = {
'macros': [
# (Sound, Label, Keycode type, Key sequence)
# 1st row ----------
(196, 'Esc', 'KC', [macropad.Keycode.ESCAPE]),
(220, 'Tab', 'KC', [macropad.Keycode.TAB]),
(246, 'Vol+', 'CC', [macropad.ConsumerControlCode.VOLUME_INCREMENT]),
# 2nd row ----------
(262, 'Play', 'CC', [macropad.ConsumerControlCode.PLAY_PAUSE]),
(294, 'Home', 'KC', [macropad.Keycode.HOME]),
(330, 'Vol-', 'CC', [macropad.ConsumerControlCode.VOLUME_DECREMENT]),
# 3rd row ----------
(349, 'End', 'KC', [macropad.Keycode.END]),
(392, 'Copy', 'KC', [macropad.Keycode.COMMAND, macropad.Keycode.C]),
(440, 'Pg Up', 'KC', [macropad.Keycode.PAGE_UP]),
# 4th row ----------
(494, 'Quit', 'KC', [macropad.Keycode.COMMAND, macropad.Keycode.Q]),
(523, 'Paste', 'KC', [macropad.Keycode.COMMAND, macropad.Keycode.V]),
(587, 'Pg Dn', 'KC', [macropad.Keycode.PAGE_DOWN]),
]
}
|
StarcoderdataPython
|
2102
|
from django.conf import settings
def less_settings(request):
return {
'use_dynamic_less_in_debug': getattr(settings, 'LESS_USE_DYNAMIC_IN_DEBUG', True)
}
|
StarcoderdataPython
|
193142
|
from django.contrib import admin
from .models import (
EveCategory,
EveConstellation,
EveGroup,
EveMoon,
EvePlanet,
EveRegion,
EveSolarSystem,
EveType,
)
class EveUniverseEntityModelAdmin(admin.ModelAdmin):
def has_module_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
ordering = ["name"]
search_fields = ["name"]
@admin.register(EveCategory)
class EveCategoryAdmin(EveUniverseEntityModelAdmin):
pass
@admin.register(EveConstellation)
class EveConstellationAdmin(EveUniverseEntityModelAdmin):
pass
@admin.register(EveGroup)
class EveGroupAdmin(EveUniverseEntityModelAdmin):
pass
@admin.register(EveMoon)
class EveMoonAdmin(EveUniverseEntityModelAdmin):
pass
@admin.register(EveRegion)
class EveRegionAdmin(EveUniverseEntityModelAdmin):
pass
@admin.register(EvePlanet)
class EvePlanetAdmin(EveUniverseEntityModelAdmin):
pass
@admin.register(EveSolarSystem)
class EveSolarSystemAdmin(EveUniverseEntityModelAdmin):
pass
@admin.register(EveType)
class EveTypeAdmin(EveUniverseEntityModelAdmin):
pass
|
StarcoderdataPython
|
3326179
|
class Solution:
def heightChecker(self, heights: List[int]) -> int:
sortedH = sorted(heights)
count = 0
for i in range(len(heights)):
if sortedH[i] != heights[i]:
count +=1
else:
pass
return count
|
StarcoderdataPython
|
1729230
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='api_ai_graph',
version='0.1.0',
description='Render graphs based on API.AI intents.',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/franciscoafonsoo/api_ai_graph',
license=license,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=["nose", "sphinx", "graphviz", "numpy", "pyqt5"],
dependency_links=[
"https://bitbucket.org/fchampalimaud/logging-bootstrap/get/master.zip",
"https://github.com/UmSenhorQualquer/pysettings/archive/master.zip",
"https://github.com/UmSenhorQualquer/pyforms/archive/master.zip"
],
)
|
StarcoderdataPython
|
42570
|
import matplotlib.pyplot as plt
from string import ascii_uppercase
def countSpecific(_path, _letter):
_letter = _letter.strip().upper()
file = open(_path, 'rb')
text = str(file.read())
return text.count(_letter) + text.count(_letter.lower())
def countAll(_path):
file = open(_path, "rb")
text = str(file.read())
letters = dict.fromkeys(ascii_uppercase, 0)
for char in text:
if char.isalpha():
letters[char.upper()]+=1
return letters
path = input("What file would you like to use? (text.txt) ")
D = countAll("src\\Other\\" + path)
# D = D | countAll("src\\Other\\" + path)
# S = {k: v for k, v in sorted(D.items(), key=lambda item: item[1])}
print(D)
plt.bar(range(len(D)), list(D.values()), align='center')
plt.xticks(range(len(D)), list(D.keys()))
plt.show()
|
StarcoderdataPython
|
1716720
|
<reponame>Keeper-Security/secrets-manager
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Secrets Manager
# Copyright 2021 Keeper Security Inc.
# Contact: <EMAIL>
class Context:
def __init__(self, transmission_key, client_id, client_key):
self.transmissionKey = transmission_key
self.clientId = client_id
self.clientKey = client_key
class TransmissionKey:
def __init__(self, publicKeyId, key, encryptedKey):
self.publicKeyId = publicKeyId
self.key = key
self.encryptedKey = encryptedKey
class GetPayload:
def __init__(self):
self.clientVersion = None
self.clientId = None
self.publicKey = None
self.requestedRecords = None
class CreatePayload:
def __init__(self):
self.clientVersion = None
self.clientId = None
self.recordUid = None
self.recordKey = None
self.folderUid = None
self.folderKey = None
self.data = None
class FileUploadPayload:
def __init__(self):
self.clientVersion = None
self.clientId = None
self.fileRecordUid = None
self.fileRecordKey = None
self.fileRecordData = None
self.ownerRecordUid = None
self.ownerRecordData = None
self.linkKey = None
self.fileSize = None
class UpdatePayload:
def __init__(self):
self.clientVersion = None
self.clientId = None
self.recordUid = None
self.data = None
self.revision = None
class EncryptedPayload:
def __init__(self, encrypted_payload, signature):
self.encrypted_payload = encrypted_payload
self.signature = signature
class KSMHttpResponse:
def __init__(self, status_code, data, http_response=None):
self.status_code = status_code
self.data = data
self.http_response = http_response
|
StarcoderdataPython
|
106638
|
import numpy as np
import matplotlib.pyplot as plt
import ReinforcedPy as rp
import matplotlib.patches as mpatches
concreto28 = rp.Concreto()
acero420 = rp.AceroRefuerzo()
viga=rp.Elemento(0.3,0.6,[concreto28,acero420],6)
viga.generarDesdeCarga(50)
viga._test_secciones()
print(viga.secciones[0].momentoNominal())
print(viga.secciones[3].momentoNominal())
print(viga.secciones[-1].momentoNominal())
viga.diagramaMomentoNominal()
viga.secciones[3].dibujar()
|
StarcoderdataPython
|
92145
|
try:
from json import load
from math import floor
from os import path
from random import choice, sample, randrange
except ImportError:
raise ImportError
class Fighter:
def __init__(self, as_str, as_con, as_dex,
as_int, as_wis, as_cha, char_race, char_background, level):
self.classname = "Fighter"
self.level = level
self.subclass = ""
self.as_scores = []
for x, y in zip(char_race.bonus_asi, [as_str, as_dex, as_con, as_int, as_wis, as_cha]):
self.as_scores.append(x + y)
# Handle ASIs after level ups.
asi_points = 0
for x in [4, 6, 8, 12, 14, 16, 19]:
if self.level >= x:
asi_points += 2
for x in range(0, 6):
while self.as_scores[x] < 20 and asi_points > 0:
self.as_scores[x] += 1
asi_points -= 1
self.as_mods = []
for x in self.as_scores:
self.as_mods.append(floor((x - 10) / 2))
self.hp = (10 + self.as_mods[2]) + sum([randrange(1, 11) for x in range(0,
self.level - 1)])
self.armor_profs = [
"Heavy armor",
"Medium armor",
"Light armor",
"Shields",
]
self.weapon_profs = [
"Simple weapons",
"Martial weapons"
]
self.tool_profs = []
for x in char_background.tool_profs:
self.tool_profs.append(x)
self.saving_throws = ["Strength", "Constitution"]
self.equipment = []
resource_path = path.join(path.dirname(__file__))
with open(resource_path + '/../resources/items.json') as items:
martial_weapon = load(items)['weapons']['martial']
equipment_choices = [
# Equipment A
[["Chain mail"],
["Leather armor", "Longbow", "20 arrows"]],
# Equipment B
[[choice(martial_weapon), "Shield"],
[choice(martial_weapon), choice(martial_weapon)]],
# Equipment C
[["Light crossbow", "20 bolts"],
["Handaxe", "Handaxe"]],
# Equipment D
[["Dungeoneer's pack"],
["Explorer's pack"]]
]
for x in equipment_choices:
for y in choice(x):
self.equipment.append(y)
self.skills = []
skill_choices = ["Acrobatics", "Animal Handling", "Athletics",
"History", "Insight", "Intimidation", "Perception", "Survival"]
for x in char_background.skill_profs:
if x in skill_choices:
skill_choices.remove(x)
self.skills = char_background.skill_profs + sample(skill_choices, 2)
self.features = []
self.fighting_styles = [
["Archery",
"You gain a +2 bonus to attack rolls you make with ranged weapons."],
["Defense",
"While you are wearing armor, you gain a +1 bonus to AC."],
["Dueling",
"When you are wielding a melee weapon in one hand and no other weapons, "
"you gain a +2 bonus to damage rolls with that weapon."],
["Great Weapon Fighting",
"When you roll a 1 or 2 on a damage die for an attack you make with a melee "
"weapon that you are wielding with two hands, you can reroll the die and must "
"use the new roll, even if the new roll is a 1 or a 2. The weapon must have "
"the two-handed or versatile property for you to gain this benefit."],
["Protection",
"When a creature you can see attacks a target other than you that is within "
"5 feet of you, you can use your reaction to impose disadvantage on the attack "
"roll. You must be wielding a shield."],
["Two-Weapon Fighting",
"When you engage in two-weapon fighting, you can add your ability modifier to "
"the damage of the second attack."]
]
y = choice(self.fighting_styles)
self.features.append(y)
# Can't pick the same fighting style more than once.
self.fighting_styles.remove(y)
if self.level >= 3:
choice([self.subclass_champion()])
fighter_feats = {
2: ["Action Surge",
"Starting at 2nd level, you can push yourself beyond your normal limits for a "
"moment. On your turn, you can take one additional action. Once you use this "
"feature, you must finish a short or long rest before you can use it again. "
"Starting at 17th level, you can use it twice before a rest, but only once "
"on the same turn."],
5: ["Extra Attack",
"Beginning at 5th level, you can attack twice, instead of once, whenever "
"you take the Attack action on your turn. The number of attacks increases to "
"three when you reach 11th level in this class and to four when you reach 20th "
"level in this class."],
9: ["Indomitable",
"Beginning at 9th level, you can reroll a saving throw that you fail. If you "
"do so, you must use the new roll, and you can’t use this feature again until "
"you finish a long rest. You can use this feature twice between long rests "
"starting at 13th level and three times between long rests starting at 17th "
"level."],
}
for x in fighter_feats:
if self.level >= x:
self.features.append(fighter_feats[x])
def subclass_champion(self):
self.subclass = "Champion"
subclass_feats = {
3: ["Improved Critical",
"Beginning when you choose this archetype at 3rd level, "
"your " "weapon attacks score a critical hit on a roll "
"of 19 or 20."],
7: ["Remarkable Athlete",
"Starting at 7th level, you can add half your proficiency "
"bonus (round up) to any Strength, Dexterity, or Constitution "
"check you make that doesn’t already use your proficiency "
"bonus. In addition, when you make a running long jump, the "
"distance you can cover increases by a number of feet equal "
"to your Strength modifier."],
# TODO: Implement this functionality
10: choice(self.fighting_styles),
15: ["Superior Critical",
"Starting at 15th level, your weapon attacks score a critical hit "
"on a roll of 18–20."],
18: ["Survivor",
"At 18th level, you attain the pinnacle of resilience in battle. "
"At the start of each of your turns, you regain hit points equal "
"to 5 + your Constitution modifier if you have no more than half of "
"your hit points left. You don’t gain this benefit if you have 0 hit points."]
}
for x in subclass_feats:
if self.level >= x:
self.features.append(subclass_feats[x])
|
StarcoderdataPython
|
105505
|
<reponame>gcewing/PyGUI
#
# Python GUI - Menus - Gtk version
#
from gi.repository import Gtk
from gi.repository import Gdk
from GUI.Globals import application
from GUI.GMenus import Menu as GMenu, MenuItem
def _report_accel_changed_(*args):
print "Menus: accel_changed:", args
class Menu(GMenu):
def __init__(self, title, items, **kwds):
GMenu.__init__(self, title, items, **kwds)
self._gtk_menu = Gtk.Menu()
self._gtk_accel_group = Gtk.AccelGroup()
#self._gtk_accel_group.connect('accel_changed', _report_accel_changed_) ###
def _clear_platform_menu(self):
gtk_menu = self._gtk_menu
for gtk_item in gtk_menu.get_children():
gtk_item.destroy()
def _add_separator_to_platform_menu(self):
gtk_item = Gtk.MenuItem()
gtk_item.set_sensitive(0)
gtk_separator = Gtk.HSeparator()
gtk_item.add(gtk_separator)
self._gtk_add_item(gtk_item)
def _gtk_add_item(self, gtk_item):
gtk_item.show_all()
self._gtk_menu.append(gtk_item)
def _add_item_to_platform_menu(self, item, name, command = None, index = None):
checked = item.checked
if checked is None:
gtk_item = Gtk.MenuItem.new_with_label(name)
else:
gtk_item = Gtk.CheckMenuItem.new_with_label(name)
self._gtk_add_item(gtk_item)
if not item.enabled:
gtk_item.set_sensitive(0)
if checked:
gtk_item.set_active(1)
if command:
app = application()
if index is not None:
action = lambda widget: app.dispatch(command, index)
else:
action = lambda widget: app.dispatch(command)
gtk_item.connect('activate', action)
key = item._key
if key:
gtk_modifiers = Gdk.ModifierType.CONTROL_MASK
if item._shift:
gtk_modifiers |= Gdk.ModifierType.SHIFT_MASK
if item._option:
gtk_modifiers |= Gdk.ModifierType.MOD1_MASK
gtk_item.add_accelerator('activate', self._gtk_accel_group,
ord(key), gtk_modifiers, Gtk.AccelFlags.VISIBLE)
|
StarcoderdataPython
|
182573
|
<reponame>hirusha-adi/GifGang<gh_stars>0
import random
from datetime import datetime
import discord
from discord.ext import commands
from module import nsfw
class Nsfw(commands.Cog):
def __init__(self, client: commands.Bot):
self.client = client
@commands.command()
async def eporner(self, ctx, *args):
"""
Usage:
.erporner [query]
Arguments:
[query]
what to search for
defaults to a randomly selected word
Example:
.eporner | works
.eporner cumshot | works
.eporner lesbians | works
"""
obj = nsfw.Eporner()
try:
if (len(args) != 0) or (not(args is None)):
args_str = ' '.join(args)
images_list_first = obj.search(query=args_str, limit=50)
else:
images_list_first = obj.random(limit=50)
except:
images_list_first = obj.random(limit=50)
video = random.choice(images_list_first)
embed = discord.Embed(
title=video["title"],
url=video["src_url"],
color=0x2699ED,
timestamp=datetime.utcnow()
)
embed.set_author(
name=str(self.client.user.name),
icon_url=str(self.client.user.avatar_url)
)
embed.add_field(
name="Keywords",
value=f'`{video["keywords"]}`',
inline=False
)
embed.add_field(
name="Views",
value=f'`{video["views"]}`',
inline=False
)
embed.add_field(
name="Rating",
value=f'`{video["rate"]}`',
inline=False
)
embed.add_field(
name="Uploaded on",
value=f'`{video["uploaded_on"]}`',
inline=False
)
embed.add_field(
name="Length",
value=f'`{video["length"]}`',
inline=False
)
embed.set_image(url=str(video["url"]))
embed.set_footer(text=f"Reuqested by {ctx.author.name}")
await ctx.send(embed=embed)
@commands.command()
async def redtube(self, ctx, *args):
"""
Usage:
.redtube [mode-or-query]
Arguments:
[mode-or-query]
"stars", "star", "pornstar"
if first word is in above words,
will send a random pornstar
if not,
will search for result in redtube
this can be the mode or what to search for
Examples:
.redtube | works
.redtube star | works - send random pornstar
.redtube cumshot | works
"""
obj = nsfw.RedTube()
try:
if (len(args) != 0) or (not(args is None)):
if args[0] in ("stars", "star", "pornstar"):
video = False
video_list = obj.stars(
page=str(
random.randint(
1,
1500
)
)
)
else:
video = True
args_str = ' '.join(args)
video_list = obj.search(query=args_str)
else:
video = True
video_list = obj.random()
except:
video = True
video_list = obj.random()
if video == True:
video = random.choice(video_list)
embed = discord.Embed(
title=video["title"],
url=video["src_url"],
color=0x2699ED,
timestamp=datetime.utcnow()
)
embed.set_author(
name=str(self.client.user.name),
icon_url=str(self.client.user.avatar_url)
)
embed.add_field(
name="Views",
value=f'`{video["views"]}`',
inline=False
)
embed.add_field(
name="Rating",
value=f'`{video["rating"]}` out of `{video["ratings"]}` ratings.',
inline=False
)
embed.add_field(
name="Uploaded on",
value=f'`{video["publish_date"]}`',
inline=False
)
embed.add_field(
name="Length",
value=f'`{video["duration"]}`',
inline=False
)
embed.set_image(url=str(video["url"]))
embed.set_footer(text=f"Reuqested by {ctx.author.name}")
await ctx.send(embed=embed)
else:
image = random.choice(video_list)
embed = discord.Embed(
title=image["title"],
url=image["src_url"],
color=0x2699ED,
timestamp=datetime.utcnow()
)
embed.set_author(
name=str(self.client.user.name),
icon_url=str(self.client.user.avatar_url)
)
embed.set_image(url=image["url"])
embed.set_footer(text=f"Reuqested by {ctx.author.name}")
await ctx.send(embed=embed)
def setup(client: commands.Bot):
client.add_cog(Nsfw(client))
|
StarcoderdataPython
|
142524
|
<filename>python_code_examples/scraping/xkcd_url_scrape.py<gh_stars>10-100
from bs4 import BeautifulSoup
import requests
start = "https://xkcd.com/2260/"
page = requests.get(start)
soup = BeautifulSoup(page.text, 'html.parser')
prevLink = soup.select('a[rel="prev"]')[0]
print(prevLink)
print( prevLink.get('href') )
url = 'https://xkcd.com' + prevLink.get('href')
print(url)
|
StarcoderdataPython
|
3282458
|
<reponame>zimagi/zima<filename>app/data/log/models.py
from django.utils.timezone import now
from systems.models.index import Model, ModelFacade
class LogFacade(ModelFacade('log')):
def get_field_message_render_display(self, instance, value, short):
from systems.commands import messages
display = []
for record in instance.messages.all().order_by('created'):
msg = messages.AppMessage.get(record.data, decrypt = False)
display.append(msg.format(True))
return "\n".join(display) + "\n"
class Log(Model('log')):
STATUS_SUCCESS = 'success'
STATUS_FAILED = 'failed'
def save(self, *args, **kwargs):
if not self.name:
self.name = "{}{}".format(
now().strftime("%Y%m%d%H%M%S"),
self.facade.generate_token(5)
)
super().save(*args, **kwargs)
def success(self):
return self.status == self.STATUS_SUCCESS
def running(self):
return not self.status or self.status not in (self.STATUS_SUCCESS, self.STATUS_FAILED)
def set_status(self, success):
self.status = self.STATUS_SUCCESS if success else self.STATUS_FAILED
class LogMessage(Model('log_message')):
def __str__(self):
return "{} ({})".format(self.log.command, self.data)
|
StarcoderdataPython
|
4838472
|
<reponame>coderMaruf/leetcode-1
'''
You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version of your product fails the quality check. Since each version is developed based on the previous version, all the versions after a bad version are also bad.
Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the following ones to be bad.
You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to find the first bad version. You should minimize the number of calls to the API.
Example:
Given n = 5, and version = 4 is the first bad version.
call isBadVersion(3) -> false
call isBadVersion(5) -> true
call isBadVersion(4) -> true
Then 4 is the first bad version.
'''
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
product_status = None
def isBadVersion(version):
global product_status
return product_status[version-1] == 'x'
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
left, right = 1, n
while left <= right:
mid = left + (right-left)//2
if isBadVersion(mid):
right = mid - 1
else:
left = mid + 1
return (right+1)
# n : the length of input parameter
## Time Complexity: O( log n )
#
# The overhead in time is the cost of binary search from 1 to n, which is of O( log n )
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for index of binary search, which is of O( 1 )
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'product_status')
def test_bench():
test_data = [
TestEntry( product_status = ['o','o','o','x','x'] ),
TestEntry( product_status = ['o','o','o','o','x'] ),
TestEntry( product_status = ['o','o','x','x','x'] ),
TestEntry( product_status = ['o','x','x','x','x'] ),
TestEntry( product_status = ['x','x','x','x','x'] ),
]
# expected output:
'''
4
5
3
2
1
'''
global product_status
for t in test_data:
product_status = t.product_status
print( Solution().firstBadVersion( n = len(product_status) ) )
return
if __name__ == '__main__':
test_bench()
|
StarcoderdataPython
|
1783386
|
<filename>lambkin/zip.py
from __future__ import absolute_import
import os
import zipfile
import lambkin.metadata as metadata
# REF: http://docs.aws.amazon.com/lambda/latest/dg/lambda-python-how-to-create-deployment-package.html
def create_zip(zip_file_path):
if not zip_file_path:
function = metadata.get('function')
zip_file_path = '/tmp/lambkin-publish-%s.zip' % function
zip_file = zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk('.'):
site_dir = os.path.join('.', 'venv', 'lib', 'python2.7', 'site-packages')
dist_dir = os.path.join('.', 'venv', 'lib', 'python2.7', 'dist-packages')
for f in files:
path = os.path.join(root, f)
if path.endswith('.pyc'):
pass
elif root.startswith(site_dir):
# Then strip the library dir, and put the file in the zip.
trimmed_path = path[len(site_dir):]
zip_file.write(path, trimmed_path)
elif root.startswith(dist_dir):
# Then strip the library dir, and put the file in the zip.
trimmed_path = path[len(dist_dir):]
zip_file.write(path, trimmed_path)
elif root.startswith('./venv') or root.startswith('./.git'):
# Then it's other junk that we don't want.
pass
else:
# Not sure what this is. The function author probably put
# it here for a reason, so make sure it goes in the zip.
zip_file.write(path, path)
zip_file.close()
return zip_file_path
|
StarcoderdataPython
|
57413
|
import logging
from dataclasses import dataclass
from unittest.mock import patch
import pytest
from tests.utils.mock_backend import (
ApiKey,
BackendContext,
Run,
Project,
Team,
User,
)
from tests.utils.mock_base_client import MockBaseClient
########################################
########### BackendContext #############
########################################
@dataclass
class DefaultData:
user: User
api_key: ApiKey
team: Team
project: Project
run: Run
@pytest.fixture(scope="session")
def default_data() -> DefaultData:
user = User()
api_key = ApiKey(user.Id)
team = Team(user.Id, isPersonal=True)
project = Project(team.Id)
run = Run(userId=user.Id, teamId=team.Id, projectId=project.Id)
return DefaultData(user=user, api_key=api_key, team=team, project=project, run=run)
@pytest.fixture(scope="function", autouse=True)
def patch_ctx(default_data: DefaultData):
logging.info("Patching tests.utils.mock_backend.ctx to have default values")
ctx = BackendContext()
for (k, v) in default_data.__dict__.items():
ctx.set(k, v)
with patch("tests.utils.mock_backend.ctx", ctx):
logging.info("Successfully patched tests.utils.mock_backend.ctx")
yield
logging.info("unpatching tests.utils.mock_backend.ctx back to fresh state")
@pytest.fixture(scope="session", autouse=True)
def patch_base_client():
with patch("manta_lab.api.client._BaseClient", MockBaseClient):
logging.info("Successfully patched manta_lab.api.client_BaseClient with MockBaseClient")
yield
logging.info("unpatching manta_lab.api.client_BaseClient")
# @pytest.fixture()
# def run(request):
# marker = request.node.get_closest_marker("manta_args")
# kwargs = marker.kwargs if marker else dict(env={})
# for k, v in kwargs["env"].items():
# os.environ[k] = v
# # TODO: should be create run by manta.init
# s = Settings()
# s.update_envs(kwargs["env"])
# return Run(settings=s)
|
StarcoderdataPython
|
15709
|
<filename>oriskami/test/resources/test_router_data.py
import os
import oriskami
import warnings
from oriskami.test.helper import (OriskamiTestCase)
class OriskamiAPIResourcesTests(OriskamiTestCase):
def test_router_data_update(self):
response = oriskami.RouterData.update("0", is_active="true")
self.assertTrue(hasattr(response.data, "__iter__"))
self.assertEqual(response.data[0].is_active, "true")
response = oriskami.RouterData.update("0", is_active="false")
self.assertEqual(response.data[0].is_active, "false")
def test_router_data_list(self):
response = oriskami.RouterData.list()
self.assertTrue(hasattr(response.data, "__iter__"))
self.assertTrue(len(response.data), 1)
self.assertTrue(hasattr(response.data[0], "is_active"))
|
StarcoderdataPython
|
3334480
|
<filename>app/api/models/route.py
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import Dict, List # noqa: F401
from app import util
from app.api.models.base_model_ import Model
class Route(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(
self, vehicle_id: int = None, stop_number: int = None,
): # noqa: E501
"""Route - a model defined in Swagger
:param vehicle_id: The vehicle_id of this Route. # noqa: E501
:type vehicle_id: int
:param stop_number: The stop_number of this Route. # noqa: E501
:type stop_number: int
"""
self.swagger_types = {
"vehicle_id": int,
"stop_number": int,
}
self.attribute_map = {
"vehicle_id": "vehicle_id",
"stop_number": "stop_number",
}
self._vehicle_id = vehicle_id
self._stop_number = stop_number
@classmethod
def from_dict(cls, dikt) -> "Route":
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Route of this Route. # noqa: E501
:rtype: Route
"""
return util.deserialize_model(dikt, cls)
@property
def vehicle_id(self) -> int:
"""Gets the vehicle_id of this Route.
:return: The vehicle_id of this Route.
:rtype: int
"""
return self._vehicle_id
@vehicle_id.setter
def vehicle_id(self, vehicle_id: int):
"""Sets the vehicle_id of this Route.
:param vehicle_id: The vehicle_id of this Route.
:type vehicle_id: int
"""
if vehicle_id is None:
raise ValueError(
"Invalid value for `vehicle_id`, must not be `None`"
) # noqa: E501
self._vehicle_id = vehicle_id
@property
def stop_number(self) -> int:
"""Gets the stop_number of this Route.
:return: The stop_number of this Route.
:rtype: int
"""
return self._stop_number
@stop_number.setter
def stop_number(self, stop_number: int):
"""Sets the stop_number of this Route.
:param stop_number: The stop_number of this Route.
:type stop_number: int
"""
if stop_number is None:
raise ValueError(
"Invalid value for `stop_number`, must not be `None`"
) # noqa: E501
self._stop_number = stop_number
|
StarcoderdataPython
|
3355016
|
<filename>beam_search.py
"""Beam search implementation in PyTorch."""
#
#
# hyp1#-hyp1---hyp1 -hyp1
# \ /
# hyp2 \-hyp2 /-hyp2#hyp2
# / \
# hyp3#-hyp3---hyp3 -hyp3
# ========================
#
# Takes care of beams, back pointers, and scores.
# Code borrowed from PyTorch OpenNMT example
# https://github.com/pytorch/examples/blob/master/OpenNMT/onmt/Beam.py
import torch
class Beam(object):
"""Ordered beam of candidate outputs."""
def __init__(self, size, vocab, cuda=False):
"""Initialize params."""
self.size = size
self.done = False
self.pad = vocab['<pad>']
self.bos = vocab['<s>']
self.eos = vocab['</s>']
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size).fill_(self.pad)]
self.nextYs[0][0] = self.bos
# The attentions (matrix) for each time.
self.attn = []
# Get the outputs for the current timestep.
def get_current_state(self):
"""Get state of beam."""
return self.nextYs[-1]
# Get the backpointers for the current timestep.
def get_current_origin(self):
"""Get the backpointer to the beam at this step."""
return self.prevKs[-1]
# Given prob over words for every last beam `wordLk` and attention
# `attnOut`: Compute and update the beam search.
#
# Parameters:
#
# * `wordLk`- probs of advancing from the last step (K x words)
# * `attnOut`- attention at the last step
#
# Returns: True if beam search is complete.
def advance(self, workd_lk):
"""Advance the beam."""
num_words = workd_lk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beam_lk = workd_lk + self.scores.unsqueeze(1).expand_as(workd_lk)
else:
beam_lk = workd_lk[0]
flat_beam_lk = beam_lk.view(-1)
bestScores, bestScoresId = flat_beam_lk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = bestScoresId / num_words
self.prevKs.append(prev_k)
self.nextYs.append(bestScoresId - prev_k * num_words)
# End condition is when top-of-beam is EOS.
if self.nextYs[-1][0] == self.eos:
self.done = True
return self.done
def sort_best(self):
"""Sort the beam."""
return torch.sort(self.scores, 0, True)
# Get the score of the best in the beam.
def get_best(self):
"""Get the most likely candidate."""
scores, ids = self.sort_best()
return scores[1], ids[1]
# Walk back to construct the full hypothesis.
#
# Parameters.
#
# * `k` - the position in the beam to construct.
#
# Returns.
#
# 1. The hypothesis
# 2. The attention at each time step.
def get_hyp(self, k):
"""Get hypotheses."""
hyp = []
# print(len(self.prevKs), len(self.nextYs), len(self.attn))
for j in range(len(self.prevKs) - 1, -1, -1):
hyp.append(self.nextYs[j + 1][k])
k = self.prevKs[j][k]
return hyp[::-1]
|
StarcoderdataPython
|
118216
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 22 14:15:18 2017
@author: <NAME>
"""
import numpy as np
import pickle
matrixShape = ##Shape##
matrixDType = ##Dtype##
workingDir = "##workingDir##"
fileNameString = "##fileNameString##"
datafile = open(workingDir+fileNameString+"_Txt.txt", 'w')
dumpsfile = open(workingDir+fileNameString+"_Dump.txt", 'w')
matrix = np.indices(matrixShape,dtype = matrixDType)
datafile.write(str(matrix))
dumpsfile.write(np.ndarray.dumps(matrix))
datafile.close()
dumpsfile.close()
|
StarcoderdataPython
|
1618683
|
# -*- coding:utf8 -*-
###############################################################################
# #
# SYMBOLS, TABLES #
# #
###############################################################################
from collections import OrderedDict
class Symbol(object):
def __init__(self, name, type=None):
self.name = name
self.type = type
class VarSymbol(Symbol):
def __init__(self, name, type):
super(VarSymbol, self).__init__(name, type)
def __str__(self):
return "<{class_name}(name='{name}', type='{type}')>".format(
class_name=self.__class__.__name__,
name=self.name,
type=self.type,
)
__repr__ = __str__
class StructSymbol(Symbol):
def __init__(self, name, type, attributes):
super(StructSymbol, self).__init__(name, type)
self.attributes = attributes
def __str__(self):
return "<{class_name}(name='{name}',({attr}) type='{type}')>".format(
class_name=self.__class__.__name__,
attr=",".join([str(attribute for attribute in self.attributes.keys())]),
name=self.name,
type=self.type,
)
__repr__ = __str__
class BuiltinTypeSymbol(Symbol):
def __init__(self, name):
super(BuiltinTypeSymbol, self).__init__(name)
def __str__(self):
return self.name
def __repr__(self):
return "<{class_name}(name='{name}')>".format(
class_name=self.__class__.__name__,
name=self.name,
)
class FunctionSymbol(Symbol):
def __init__(self, name, type, params=None):
super(FunctionSymbol, self).__init__(name, type=type)
# a list of formal parameters
self.params = params if params is not None else []
def __str__(self):
return '<{class_name}(type={type}, name={name}, parameters={params})>'.format(
class_name=self.__class__.__name__,
name=self.name,
params=self.params,
type=self.type
)
__repr__ = __str__
class ScopedSymbolTable(object):
def __init__(self, scope_name, scope_level, enclosing_scope=None):
self._symbols = OrderedDict()
self.scope_name = scope_name
self.scope_level = scope_level
self.enclosing_scope = enclosing_scope
def _init_builtins(self):
self.insert(BuiltinTypeSymbol('char'))
self.insert(BuiltinTypeSymbol('int'))
self.insert(BuiltinTypeSymbol('float'))
self.insert(BuiltinTypeSymbol('double'))
self.insert(BuiltinTypeSymbol('void'))
def __str__(self):
h1 = 'SCOPE (SCOPED SYMBOL TABLE)'
lines = ['\n', h1, '=' * len(h1)]
for header_name, header_value in (
('Scope name', self.scope_name),
('Scope level', self.scope_level),
('Enclosing scope',
self.enclosing_scope.scope_name if self.enclosing_scope else None
)
):
lines.append('%-15s: %s' % (header_name, header_value))
h2 = 'Scope (Scoped symbol table) contents'
lines.extend([h2, '-' * len(h2)])
lines.extend(
('%7s: %r' % (key, value))
for key, value in self._symbols.items()
)
lines.append('\n')
s = '\n'.join(lines)
return s
__repr__ = __str__
def insert(self, symbol):
# print('Insert: %s' % symbol.name)
self._symbols[symbol.name] = symbol
def lookup(self, name, current_scope_only=False, struct=False):
#print('Lookup: %s. (Scope name: %s)' % (name, self.scope_name))
# 'symbol' is either an instance of the Symbol class or None
symbol = self._symbols.get(name)
if symbol is not None:
return symbol
if current_scope_only:
return None
# recursively go up the chain and lookup the name
if self.enclosing_scope is not None:
return self.enclosing_scope.lookup(name)
|
StarcoderdataPython
|
3398874
|
<filename>tests/test_hse.py
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2021 Micron Technology, Inc. All rights reserved.
import unittest
from hse2 import hse
from common import UNKNOWN
class HseTests(unittest.TestCase):
def test_param(self):
for args in (("socket.enabled", "false"), ("this-does-not-exist", None)):
with self.subTest(param=args[0], value=args[1]):
if args[1]:
self.assertEqual(hse.param(args[0]), args[1])
else:
with self.assertRaises(hse.HseException):
hse.param(args[0])
if __name__ == "__main__":
unittest.main(argv=UNKNOWN)
|
StarcoderdataPython
|
132616
|
<gh_stars>0
import speech_recognition as SpeechRecog
import pyaudio
from random_word import RandomWords
import random
import time
import threading
init_rec = SpeechRecog.Recognizer()
score = 0
num_ques = 0
lang = {
1: 'en-US',
2: 'hi-IN',
3: 'ta-IN',
4: 'te-IN',
5: 'kn-IN',
6: 'zh-CN',
7: 'ja-JP',
8: 'it-IT',
9: 'fr-FR',
10: 'de-DE'
}
def quiz(string):
global score
global num_ques
ran_word = random_words()
# taking a random word from the correct answer
random_correct = []
minimize = [words for words in string if len(words) >= 5]
len_str = len(minimize)
x = random.randint(0, len_str)
for i in range(3):
random_correct.append(minimize[(x+i)%len_str])
random_correct = ' '.join(random_correct)
# print(random_correct)
# appending random word from the wrong answer and correct answer
options = []
for i in ran_word:
options.append(i)
options.append(random_correct)
# shuffling all the options
random.shuffle(options)
# print(options)
op1 = options[0]
op2 = options[1]
op3 = options[2]
op4 = options[3]
print("Choose the word which was mentioned by the professor:")
print("1.", options[0], "2.", options[1],
"3.", options[2], "4.", options[3])
user_choice = input("Enter option:")
if user_choice == "1":
ans = op1
elif user_choice == "2":
ans = op2
elif user_choice == "3":
ans = op3
elif user_choice == "4":
ans = op4
else:
print("Inavlid option")
if ans == random_correct:
print("Correct option")
score += 1
num_ques += 1
else:
print("Wrong option")
num_ques += 1
def random_words():
r = RandomWords()
words = r.get_random_words()
word_final = []
words1 = words[:3]
words1 = ' '.join(words1)
word_final.append(words1)
words2 = words[3:6]
words2 = ' '.join(words2)
word_final.append(words2)
words3 = words[6:9]
words3 = ' '.join(words3)
word_final.append(words3)
return word_final
def main(num):
while True:
words = []
for i in range(1):
with SpeechRecog.Microphone() as source:
audio_data = init_rec.record(source, duration=10)
try:
text = init_rec.recognize_google(audio_data, language = lang[num])
except:
text = ''
#text = 'Note that we may get different output because this program generates random number in range 0 and 9.'
text_filtered = text.split(' ')
for j in text_filtered:
words.append(j)
quiz(words)
'''
thread_quiz = threading.Thread(target=quiz, args=(words, ))
thread_quiz.start()
thread_quiz.join()
'''
print(score)
if __name__ == '__main__':
main(3)
|
StarcoderdataPython
|
3269928
|
import sys
"""Functions to support backwards compatibility.
Basically where we have functions which differ between python 2 and 3, we provide implementations here
and then Python-specific versions in backward2 and backward3.
"""
if sys.hexversion >= 0x03000000: # Python 3+
from stomp.backward3 import *
else: # Python 2
from stomp.backward2 import *
def get_errno(e):
"""
Return the errno of an exception, or the first argument if errno is not available.
:param e: the exception object
"""
try:
return e.errno
except AttributeError:
return e.args[0]
try:
from fractions import gcd
except ImportError:
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
(Copied from the Python2.6 source Copyright (c) 2001-2011 Python Software Foundation; All Rights Reserved)
"""
while b:
a, b = b, a % b
return a
try:
from time import monotonic
except ImportError: # Python < 3.3/3.5
from time import time as monotonic
|
StarcoderdataPython
|
4829700
|
<reponame>FrNecas/ogr
from requre.online_replacing import record_requests_for_all_methods
from tests.integration.pagure.base import PagureTests
from ogr.abstract import IssueStatus
@record_requests_for_all_methods()
class Issues(PagureTests):
def setUp(self):
super().setUp()
self._long_issues_project = None
@property
def long_issues_project(self):
if not self._long_issues_project:
self._long_issues_project = self.service.get_project(
repo="pagure", namespace=None
)
return self._long_issues_project
def test_issue_list(self):
issue_list = self.ogr_project.get_issue_list()
assert isinstance(issue_list, list)
issue_list = self.ogr_project.get_issue_list(status=IssueStatus.all)
assert issue_list
assert len(issue_list) >= 2
def test_issue_list_paginated(self):
issue_list = self.long_issues_project.get_issue_list()
assert issue_list
assert len(issue_list) >= 400
def test_issue_list_author(self):
issue_list = self.ogr_project.get_issue_list(
status=IssueStatus.all, author="mfocko"
)
assert issue_list
assert len(issue_list) >= 3
def test_issue_list_nonexisting_author(self):
issue_list = self.ogr_project.get_issue_list(
status=IssueStatus.all, author="xyzidontexist"
)
assert len(issue_list) == 0
def test_issue_list_assignee(self):
issue_list = self.ogr_project.get_issue_list(
status=IssueStatus.all, assignee="mfocko"
)
assert issue_list
assert len(issue_list) == 1
def test_issue_list_labels(self):
issue_list = self.ogr_project.get_issue_list(
status=IssueStatus.all, labels=["test_label"]
)
assert issue_list
assert len(issue_list) == 1
def test_create_issue(self):
title = "This is an issue"
description = "Example of Issue description"
labels = ["label1", "label2"]
project = self.service.get_project(repo="hello-112111", namespace="testing")
issue = project.create_issue(
title=title, body=description, private=True, labels=labels
)
assert issue.title == title
assert issue.description == description
assert issue.private
for issue_label, label in zip(issue.labels, labels):
assert issue_label == label
def test_create_issue_with_assignees(self):
random_str = "something"
project = self.service.get_project(repo="hello-112111", namespace="testing")
assignee = ["mfocko"]
issue = project.create_issue(
title=random_str, body=random_str, assignees=assignee
)
assert issue.title == random_str
assert issue.description == random_str
assert issue.assignee == assignee[0]
def test_issue_assignees(self):
"""
Remove the assignees from this issue before regenerating the response files:
https://pagure.io/testing/hello-112111/issue/4
"""
project = self.service.get_project(
repo="hello-112111", namespace="testing", is_fork=True
)
issue = project.get_issue(4)
assert not project.get_issue(4).assignee
issue.add_assignee("kpostlet")
assignee = project.get_issue(4).assignee
assert assignee == "kpostlet"
def test_issue_without_label(self):
title = "This is an issue"
description = "Example of Issue description"
project = self.service.get_project(repo="hello-112111", namespace="testing")
issue = project.create_issue(title=title, body=description)
assert issue.title == title
assert issue.description == description
def test_get_comment(self):
project = self.service.get_project(
repo="my-playground", namespace=None, username="nikromen"
)
comment = project.get_issue(1).get_comment(753462)
assert comment.body == "example issue comment"
|
StarcoderdataPython
|
3319423
|
# Subcommand completion with the readline module.
#
# Tested with Python 3.4
#
# <NAME> [http://eli.thegreenplace.net]
# This code is in the public domain.
import glob
import readline
def make_subcommand_completer(commands):
def custom_complete(text, state):
# Simplistic parsing of the command-line so far. We want to know if the
# user is still entering the command, or if the command is already there
# and now we have to complete the subcommand.
linebuf = readline.get_line_buffer()
parts = linebuf.split()
if len(parts) >= 1 and linebuf.endswith(' '):
# If we're past the first part and there is whitespace at the end of
# the buffer, it means we're already completing the next part.
parts.append('')
if len(parts) <= 1:
matches = [w + ' ' for w in commands.keys()
if w.startswith(text)] + [None]
return matches[state]
elif len(parts) >= 2:
command = parts[0]
if command == 'file':
# Treat 'file' specially, by looking for matching files in the
# current directory.
matches = [w + ' ' for w in glob.glob(text + '*')] + [None]
else:
matches = [w + ' ' for w in commands[command]
if w.startswith(parts[1])] + [None]
return matches[state]
return custom_complete
def main():
commands = {
'file': {},
'eat': {'breakfast', 'dinner', 'lunch', 'snack'},
'play': {'cards', 'chess', 'go'},
'walk': {'left', 'right', 'straight'},
}
readline.parse_and_bind('tab: complete')
readline.set_completer(make_subcommand_completer(commands))
# Use the default readline completer delims; Python's readline adds '-'
# which makes filename completion funky (for files that contain '-').
readline.set_completer_delims(" \t\n\"\\'`@$><=;|&{(")
try:
while True:
s = input('>> ').strip()
print('[{0}]'.format(s))
except (EOFError, KeyboardInterrupt) as e:
print('\nShutting down...')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3293670
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
If your log file is in a standard format, then this module can help you filter
out log information that you care aboue.
**中文文档**
如果你的日志格式是标准的 "%(asctime)s; %(levelname)-8s; %(message)s"。那么本
模块中的函数能够帮助您从日志中轻松的找到你感兴趣的结果。
"""
from __future__ import print_function
class Result(object):
def __init__(self, path,
level, message, time_lower, time_upper, case_sensitive):
self.path = path
self.level = level
self.message = message
self.time_lower = time_lower
self.time_upper = time_upper
self.case_sensitive = case_sensitive
self.lines = list()
def __str__(self):
return self.header + "\n" + "".join(self.lines)
@property
def header(self):
template = ("--- Result of: filepath=%r, level=%r, pattern=%r,"
"time_lower=%r, time_upper=%r, case_sensitive=%r ---")
return template % (self.path,
self.level, self.message,
self.time_lower, self.time_upper, self.case_sensitive,
)
def dump(self, path):
with open(path, "wb") as f:
f.write(str(self).encode("utf-8"))
def find(path, level=None, message=None,
time_lower=None, time_upper=None, case_sensitive=False):
"""Filter log message.
**中文文档**
根据level名称, message中的关键字, 和log的时间的区间, 筛选出相关的日志
"""
if level:
level = level.upper() # level name has to be capitalized.
if not case_sensitive:
message = message.lower()
with open(path, "r") as f:
result = Result(path=path,
level=level, message=message,
time_lower=time_lower, time_upper=time_upper,
case_sensitive=case_sensitive,
)
for line in f:
try:
_time, _level, _message = [i.strip() for i in line.split(";")]
if level:
if _level != level:
continue
if time_lower:
if _time < time_lower:
continue
if time_upper:
if _time > time_upper:
continue
if message:
if not case_sensitive:
_message = _message.lower()
if message not in _message:
continue
result.lines.append(line)
except Exception as e:
print(e)
return result
|
StarcoderdataPython
|
1775689
|
<gh_stars>1-10
import unittest
from .realParser import eval
from .realParser import parse
class functionXTest(unittest.TestCase):
def test_one(self):
self.assertEqual(7, parse("((2)+(5))"))
self.assertEqual(-3, parse(" (2)-(5)"))
self.assertEqual(28, parse("+3 +5*5*(+1)"))
self.assertEqual(25, eval("5*(x +3)",2))
self.assertEqual(25, eval("5*(x +3)",2))
self.assertEqual(25, eval("5*(x +3)","1+1"))
self.assertEqual(0.2339992213289606, eval("(2.35*e^(-3)*x)",2))
self.assertEqual(0.9092974268256817, eval("sin(x)",2))
var = {"x":2, "Z":1}
self.assertEqual(-18.0, eval(" 2*(-(((z*3)*sqrt(x^(2)))+3))",var))
var = {"x":"1+1", "Z":1}
self.assertEqual(-18.0, eval(" 2*(-(((z*3)*sqrt(x^(2)))+3))",var))
var2 = {"x":"1+1", "Z":"cos(1)"}
self.assertEqual(eval(" 2*(-(((cos(z)*3)*sqrt(x^(2)))+3))",var), eval(" 2*(-(((z*3)*sqrt(x^(2)))+3))",var2))
self.assertEqual(eval(" 2*(-(((cos(z)*3)*sqrt(x^(2)))+3))",var),-12.483627670417677)
self.assertEqual(-12.483627670417677, eval(" 2*(-(((z*3)*sqrt(x^(2)))+3))",var2))
def test_two(self):
f_x = "5*(2*(sqrt((x+2)^2)) +3)"
x0 = 2
self.assertEqual(55, eval(f_x, x0))
f_x = "5*(2*(sqrt((x+2)^2)/2) +3)";
self.assertEqual(35, eval(f_x, x0))
f_x = "cosh(6+(2/0))"
#self.assertEqual(-1, eval(f_x, x0))
f_x = "cos(x)"
x2 = 0
self.assertEqual(1, eval(f_x, x2))
def test_three(self):
f_x = "+3 +5*5*(+1)";
self.assertEqual(28, parse(f_x))
f_xs = "x+5*y+(3 -y)"
var = {"x":"1+1*1", "y":1}
self.assertEqual(9, eval(f_xs,var))
def test_four(self):
f_x = "log(e)";
self.assertEqual(1, parse(f_x))
f_x = "log10(x)"
var = {"x":"5*2 +10 -10"}
self.assertEqual(1, eval(f_x,var))
f_x=" 1 + acos(0.1)"
self.assertEqual(2.470628905633337, parse(f_x))
var = {"x":2, "y":3.1}
f_x = " ((2+x)^2) + cos((3/2+2*y)^(0.5*x))"
self.assertEqual(16.153373862037864, eval(f_x,var))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
167130
|
from meteor_reasoner.utils.parser import *
from collections import defaultdict
def load_dataset(lines):
"""
Read string-like facts into a dictionary object.
Args:
lines (list of strings): a list of facts in the form of A(x,y,z)@[1,2] or A@[1,2)
Returns:
A defaultdict object, in which the key is the predicate and the value is a dictionary (key is
the entity and the value is a list of Interval instances) or a list of Interval instance when
there is no entity.
"""
D = defaultdict(lambda: defaultdict(list))
for line in lines:
line = line.strip().replace(" ","")
if line == "":
continue
try:
predicate, entity, interval = parse_str_fact(line)
except:
continue
if predicate not in D:
if entity:
D[predicate][entity] = [interval]
else:
D[predicate] = [interval]
else:
if isinstance(D[predicate], list) and entity is not None:
raise ValueError("One predicate can not have both entity and Null cases!")
if not isinstance(D[predicate], list) and entity is None:
raise ValueError("One predicate can not have both entity and Null cases!")
if entity:
if entity in D[predicate]:
D[predicate][entity].append(interval)
else:
D[predicate][entity] = [interval]
else:
D[predicate].append(interval)
return D
def load_program(rules):
"""
Format each string-like rule into a rule instance.
Args:
rules (list of strings): each string represents a rule, e.g. A(X):- Boxminus[1,2]B(X)
Returns:
list of rule instances
"""
program = []
for line in rules:
rule = parse_rule(line)
program.append(rule)
return program
|
StarcoderdataPython
|
1635375
|
from lichtenberg.util import draw_blur
from PIL import Image
from random import randint
from pathlib import Path
def main():
width, height = 600, 600
img = Image.new("RGB", (width, height))
blur_params = [(0, 1.0), (1, 4.0), (2, 8.0)]
color = (1.2, 1.0, 1.0)
num_line = 50
for i in range(num_line):
print(f"{i+1}/{num_line}")
sx, sy = randint(100, 500), 50
ex, ey = sx + randint(-50, 50), 550
draw_blur(img, [(sx, sy), (ex, ey)], blur_params, 0.3, color)
img.save(Path(__file__).stem + ".png")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
85115
|
import json
import os
import sys
import albumentations as A
import numpy as np
import pandas as pd
import timm
import torch
import ttach as tta
from albumentations.augmentations.geometric.resize import Resize
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from tqdm import tqdm
import missed_planes.engine as engine
import missed_planes.metrics as metrics
from missed_planes.dataset import PlanesDataset
with open(sys.argv[1], "r") as f:
config = json.load(f)
transforms = A.Compose(
[
A.Resize(height=config["image_size"], width=config["image_size"], p=1),
],
p=1,
)
test_data = pd.read_csv(config["test_csv"])
test_dataset = PlanesDataset(
test_data, path=config["test_path"], is_test=True, augmentation=transforms
)
test_loader = DataLoader(
test_dataset,
batch_size=config["batch_size"],
shuffle=False,
num_workers=config["num_workers"],
drop_last=False,
)
with torch.no_grad():
final = []
for ind in range(config["folds"]):
model = torch.load(f"{config['checkpoint']}/fold{ind}_{config['model']}.pt")
model.eval()
tta_model = tta.ClassificationTTAWrapper(model, tta.aliases.d4_transform(), merge_mode='mean')
result = []
for i in tqdm(test_loader, total=len(test_loader)):
i = i.to(config["device"])
output = tta_model(i)
output = output.view(-1).detach().cpu().numpy()
result.extend(output)
final.append(result)
result = np.array(final).mean(axis=0)
submission = pd.read_csv("data/sample_submission_extended.csv")
submission["sign"] = result
# import IPython; IPython.embed(); exit(1)
# submission["sign"] = (result > 0.5).astype(int)
# print((result > 0.5).sum())
submission.to_csv(
os.path.join(config["submission"], config["model"]) + ".csv",
index=None,
)
submission.to_csv(
os.path.join(config["submission"], config["model"]) + ".csv.gz",
compression="gzip",
index=None,
)
|
StarcoderdataPython
|
70046
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN Builder."""
from typing import Callable, Iterator, List, Optional, Sequence
from acme import adders
from acme import core
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.dqn import actor as dqn_actor
from acme.agents.jax.dqn import config as dqn_config
from acme.agents.jax.dqn import learning_lib
from acme.datasets import reverb as datasets
from acme.jax import networks as networks_lib
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import optax
import reverb
from reverb import rate_limiters
class DQNBuilder(builders.ActorLearnerBuilder):
"""DQN Builder."""
def __init__(
self,
config: dqn_config.DQNConfig,
loss_fn: learning_lib.LossFn,
logger_fn: Callable[[], loggers.Logger] = lambda: None,
actor_backend: Optional[str] = 'cpu'
):
"""Creates DQN learner and the behavior policies.
Args:
config: DQN config.
loss_fn: A loss function.
logger_fn: a logger factory for the learner.
actor_backend: Which backend to use when jitting the policy.
"""
self._config = config
self._loss_fn = loss_fn
self._logger_fn = logger_fn
self._actor_backend = actor_backend
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: networks_lib.FeedForwardNetwork,
dataset: Iterator[reverb.ReplaySample],
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
return learning_lib.SGDLearner(
network=networks,
random_key=random_key,
optimizer=optax.adam(self._config.learning_rate,
eps=self._config.adam_eps),
target_update_period=self._config.target_update_period,
data_iterator=dataset,
loss_fn=self._loss_fn,
replay_client=replay_client,
replay_table_name=self._config.replay_table_name,
counter=counter,
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,
logger=self._logger_fn())
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy_network: dqn_actor.EpsilonPolicy,
adder: Optional[adders.Adder] = None,
variable_source: Optional[core.VariableSource] = None,
) -> core.Actor:
assert variable_source is not None
# Inference happens on CPU, so it's better to move variables there too.
variable_client = variable_utils.VariableClient(variable_source, '',
device='cpu')
epsilon = self._config.epsilon
epsilons = epsilon if epsilon is Sequence else (epsilon,)
actor_core = dqn_actor.alternating_epsilons_actor_core(
policy_network, epsilons=epsilons)
return actors.GenericActor(actor=actor_core,
random_key=random_key,
variable_client=variable_client,
adder=adder,
backend=self._actor_backend)
def make_replay_tables(
self, environment_spec: specs.EnvironmentSpec) -> List[reverb.Table]:
"""Creates reverb tables for the algorithm."""
samples_per_insert_tolerance = (
self._config.samples_per_insert_tolerance_rate *
self._config.samples_per_insert)
error_buffer = self._config.min_replay_size * samples_per_insert_tolerance
limiter = rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
return [reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Prioritized(self._config.priority_exponent),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=adders_reverb.NStepTransitionAdder.signature(
environment_spec))]
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[reverb.ReplaySample]:
"""Creates a dataset iterator to use for learning."""
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=replay_client.server_address,
batch_size=(
self._config.batch_size * self._config.num_sgd_steps_per_step),
prefetch_size=self._config.prefetch_size)
return dataset.as_numpy_iterator()
def make_adder(self, replay_client: reverb.Client) -> adders.Adder:
"""Creates an adder which handles observations."""
return adders_reverb.NStepTransitionAdder(
priority_fns={self._config.replay_table_name: None},
client=replay_client,
n_step=self._config.n_step,
discount=self._config.discount)
|
StarcoderdataPython
|
3289464
|
'''KELFI code is adapated from https://github.com/Kelvin-Hsu/kelfi '''
import numpy as np
import tensorflow as tf
import pandas as pd
import pickle, time, os, sys
from os import path
import elfi
from elfi.examples import dgp_funcs, bdm_dgp, navworld
from kelfi.utils import halton_sequence
from kelfi.kernel_means_inference import kernel_means_weights, approximate_marginal_kernel_means_likelihood
from kelfi.kernel_means_inference import kernel_means_posterior, approximate_kernel_means_posterior_embedding, kernel_herding
from kelfi.kernel_means_learning import kernel_means_hyperparameter_learning
from kelfi.kernels import gaussian_kernel_gramix
def hyperparameter_learning_objective(y, x_sim, t_sim, t_samples, beta, eps, reg=None):
"""Computes the approximate MKML for different hyperparameters."""
weights = kernel_means_weights(y, x_sim, t_sim, eps, beta, reg=reg)
return approximate_marginal_kernel_means_likelihood(t_samples, t_sim, weights, beta)
def kelfi(y, x_sim, t_sim, t_samples, beta, eps, reg=None, n_samples=1000, beta_query=None):
"""Full KELFI Solution."""
weights = kernel_means_weights(y, x_sim, t_sim, eps, beta, reg=reg)
mkml = approximate_marginal_kernel_means_likelihood(t_samples, t_sim, weights, beta)
if beta_query is None:
beta_query = beta
kernel_function = lambda t1, t2: gaussian_kernel_gramix(t1, t2, beta_query)
kmpe_ = approximate_kernel_means_posterior_embedding(t_samples, t_sim, weights, beta, t_samples, marginal_likelihood=mkml, beta_query=beta_query)
t_kmpe = kernel_herding(kmpe_, kernel_function, t_samples, n_samples)
return t_kmpe
seed = 0
np.random.seed(seed)
exp_names = ['TE1', 'TE2', 'TE3', 'BDM', 'NW']
models = [dgp_funcs.multigaussian(), dgp_funcs.multimodal_logistic(), dgp_funcs.beta_x(),
bdm_dgp.bdm_simulator(), navworld.navworld_simulator()] # side = 6, test=True ep = 1
noise_var = [[5], [5], [5], [0.5, 0.03, 1, 0.7], 0]
bounds = [{'t1':(0, 100)}, {'t1':(0, 100)}, {'t1':(0, 100)},
{'R1':(1.01, 12), 'R2': (0.01, 0.4),'burden': (120, 220), 't1':(0.01, 30)},
{'white':(-20.0, 0.0), 'yellow':(-20.0, 0.0),'red':(-20.0, 0.0),
'green':(-20.0, 0.0), 'purple':(-20.0, 0.0)}]
par_names = [['t1'], ['t1'], ['t1'], ['R1', 'R2', 'burden', 't1'],
['green', 'purple', 'red', 'white', 'yellow']]
true_pars = [ {'t1': 50}, {'t1': 20}, {'t1': 20},
{'R1': 5.88, 'R2': 0.09, 'burden': 192, 't1': 6.74},
{"white": 0.0, "yellow": -1.0, "red": -1.0, "green": -5.0, "purple": -10.0}]
exp = sys.argv[1]
it = int(sys.argv[2])
auto_dif = sys.argv[3]
init_ev = 500
output_folder = 'posteriors/' + surrogate + '/'
try:
os.mkdir(output_folder[:-1])
print("Directory " , output_folder, " created")
except FileExistsError:
print("Directory " , output_folder, " already exists")
np.random.seed(it)
import time
for ind in range(0, len(models)):
if not exp in exp_names[ind]:
continue
seed = it
np.random.seed(seed)
tf.set_random_seed(seed)
true_theta = np.array([true_pars[ind][par] for par in par_names[ind]])
if 'TE' in exp_names[ind]:
y_data = models[ind].func(true_theta)
elif exp_names[ind] == 'BDM':
models[ind].get_model()
y_data = [0] # models[ind].y0_sum
elif exp_names[ind] == 'NW':
y_data = models[ind].observed_data
save_dir = output_folder + exp_names[ind] + '/' + surrogate + '-' + exp_names[ind]
try:
os.mkdir(output_folder + exp_names[ind])
print("Directory " , output_folder + exp_names[ind], " created")
except FileExistsError:
print("Directory " , output_folder + exp_names[ind], " already exists")
t0 = time.clock()
if path.exists(save_dir + 'z' + str(it) + '.dnpz') == False:
n_sim = init_ev * 2
n_prior = 10000
prior_samples = models[ind]
m = models[ind].get_model(seed_obs=seed)
outputs = m.parameter_names + ['sim']
data = m.generate(batch_size=n_sim, outputs=outputs, seed=it)
prior = m.generate(batch_size=n_prior, outputs=m.parameter_names, seed=it)
param_sample = np.reshape(data[par_names[ind][0]], (-1, 1))
t_sample = np.reshape(prior[par_names[ind][0]], (-1, 1))
for par in par_names[ind][1:]:
temp_sample = np.reshape(data[par], (-1, 1))
param_sample = np.concatenate((param_sample, temp_sample), axis=1)
temp_sample = np.reshape(prior[par], (-1, 1))
t_sample = np.concatenate((t_sample, temp_sample), axis=1)
y_data = np.array(y_data)
x_data = np.reshape(data['sim'], (-1, 1, len(y_data)))
y_data = np.reshape(y_data, (-1, 1, len(y_data)))
np.savez(save_dir + 'z' + str(it) + '.npz', name1 = x_data, name2 = y_data, name3 = param_sample, name4 = t_sample)
else:
loaded_data = np.load(save_dir + 'z' + str(it) + '.npz')
print('Load!')
x_data = loaded_data['name1']
y_data = loaded_data['name2']
param_sample = loaded_data['name3']
t_sample = loaded_data['name4']
# load
param_sample_mean = np.mean(param_sample, 0)
param_sample_std = np.std(param_sample, 0)
param_sample = (param_sample - param_sample_mean) / param_sample_std
t_sample = (t_sample - param_sample_mean) / param_sample_std
# Full hyperparameter learning with automatic differentiation
if auto_dif == True:
eps_tuple = (0.06, 'learn')
beta_tuple = (0.6, 'learn')
reg_tuple = (1e-6, 'learn')
eps_opt, beta_opt, reg_opt = kernel_means_hyperparameter_learning(
y_data, x_data, param_sample, eps_tuple, beta_tuple, reg_tuple,
eps_ratios=1., beta_ratios=1., offset=0.,
prior_samples=t_sample, prior_mean=None, prior_std=None,
learning_rate=0.01, n_iter=5000, display_steps=100)
else:
beta_array = np.linspace(0.5, 1.5, 100)
eps_array = np.linspace(0.05, 0.15, 100)
eps_grid, beta_grid = np.meshgrid(eps_array, beta_array)
mkml_grid = np.zeros((beta_array.shape[0], eps_array.shape[0]))
mkml_global = -np.inf
for i, beta in enumerate(beta_array):
for j, eps in enumerate(eps_array):
mkml_grid[i, j] = hyperparameter_learning_objective(y_data, x_data, param_sample, t_sample, beta, eps)
if mkml_grid[i, j] > mkml_global:
mkml_global = mkml_grid[i, j]
beta_global = beta
eps_global = eps
# calculate the posterior
n_samples = 100000
beta_query = 0.1
t_kmpe_opt = kelfi(y_data, x_data, param_sample, t_sample, beta_global, eps_global, reg=None, n_samples=n_samples, beta_query=beta_query)
t_kmpe_opt = t_kmpe_opt * param_sample_std + param_sample_mean
theta_df = pd.DataFrame.from_records(t_kmpe_opt, columns = par_names[ind])
theta_df.to_pickle(save_dir + '-thetas-' + str(it) + '.pkl')
|
StarcoderdataPython
|
3214952
|
import psycopg2
import gmplot
db_conn = psycopg2.connect("dbname='yelp' host='' user='' password=''")
cur = db_conn.cursor()
cur.execute("select latitude, longitude from business where postal_code='89109';")
lat_long = cur.fetchall()
latitude = []
longitude = []
for i in range(len(lat_long)):
latitude.append(lat_long[i][0])
longitude.append(lat_long[i][1])
gmap = gmplot.GoogleMapPlotter(36.1215,-115.1696, 13)
gmap.scatter(latitude, longitude, '#FF6666', edge_width=10)
gmap.draw('89109_business_map.html')
cur.close()
db_conn.close()
|
StarcoderdataPython
|
3300267
|
<reponame>Couso99/EEG-Environment
# Author: <NAME> (<EMAIL>)
from PyQt5 import QtWidgets, QtCore, QtGui
from GUI.select_subject import SubjectSelection
from GUI.ui_subject_details_no_details import Ui_NoDetails
from GUI.ui_subject_details_show import Ui_Details
class NoDetails(QtWidgets.QWidget):
def __init__(self, parent):
super(NoDetails, self).__init__(parent)
self.ui = Ui_NoDetails()
self.ui.setupUi(self)
self.ui.definePersonButton.clicked.connect(parent.select_subject)
self.ui.okButton.clicked.connect(parent.close)
class Details(QtWidgets.QWidget):
def __init__(self, parent):
super(Details, self).__init__(parent)
self.ui = Ui_Details()
self.ui.setupUi(self)
self.ui.definePersonButton.clicked.connect(parent.select_subject)
self.ui.okButton.clicked.connect(parent.close)
class SubjectDetails(QtWidgets.QMainWindow):
def __init__(self, parent):
super().__init__()
self.parent = parent
self.details = []
self.resize(300,150)
self.stackedWidget = QtGui.QStackedWidget()
self.setCentralWidget(self.stackedWidget)
self.no_details = NoDetails(self)
self.stackedWidget.addWidget(self.no_details)
self.yes_details = Details(self)
self.stackedWidget.addWidget(self.yes_details)
self.update_details()
def update_details(self):
if self.parent.personID:
self.stackedWidget.setCurrentIndex(1)
#self.ui = Ui_Details()
self.details = self.parent.db.get_person_details(self.parent.personID)
if self.details:
self.yes_details.ui.subjectIdLabel.setText(self.details[0])
self.yes_details.ui.ageLabel.setText(str(self.details[1]))
self.yes_details.ui.sexLabel.setText(self.details[2])
else:
self.stackedWidget.setCurrentIndex(0)
self.details = []
def select_subject(self):
self.selectSubjectDialog = SubjectSelection(self.parent.db)
if self.selectSubjectDialog.exec_():
personID = self.selectSubjectDialog.personID
self.parent.personID = personID
self.parent.update_subject_label()
self.update_details()
|
StarcoderdataPython
|
3294351
|
# Generated by Django 3.2.3 on 2021-05-19 08:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20210519_0849'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='year',
field=models.CharField(blank=True, choices=[('FR', 'Freshman'), ('SO', 'Sophomore'), ('JR', 'Junior'), ('SR', 'Senior')], default='FR', max_length=2, verbose_name='year'),
),
]
|
StarcoderdataPython
|
3211452
|
<filename>tirelire-account/tests/unit/tests_account.py
from unittest import TestCase
from datetime import date
from app.domain.model import (
Currency,
Category,
Account,
Operation
)
class TestAccount(TestCase):
def test_hashes_must_be_identical(self):
account = Account("abc", Currency.EUR, [])
self.assertEqual(hash(account), hash("abc"))
def test_add_operation_must_append_to_account(self):
my_account = Account("uuid", Currency.EUR, [])
t1 = Operation("my operation", date(2022,1,26), -12345.90, Currency.EUR, Category.HOUSING)
my_account.add_operation(t1)
self.assertEqual(my_account.operations, [t1])
def test_add_operation_must_raise_exception(self):
my_account = Account("uuid", Currency.USD, [])
with self.assertRaises(ValueError):
new_op = Operation("my operation", date(2022,1,26), -12345.90, 'EUR', 'HOUSING')
my_account.add_operation(new_op)
def test_compute_balance_must_return_value(self):
t1 = Operation(
"My operation one",
date.today(),
12.36,
Currency.EUR
)
t2 = Operation(
"My operation two",
date.today(),
29.78,
Currency.EUR
)
my_account = Account("uuid", Currency.EUR, [t1, t2])
self.assertEqual(my_account.compute_balance(), t1.value + t2.value)
def test_compute_balance_category_must_return_value(self):
t1 = Operation(
"My operation one",
date.today(),
1290.36,
Currency.EUR,
Category.SALARY
)
t2 = Operation(
"My operation two",
date.today(),
29.78,
Currency.EUR,
Category.HOBBIES_SPORT
)
t3 = Operation(
"My operation three",
date.today(),
4.99,
Currency.EUR,
Category.HOBBIES_SPORT
)
my_account = Account("uuid", Currency.EUR, [t1, t2, t3])
self.assertEqual(my_account.compute_category_balance(Category.SALARY), t1.value)
self.assertEqual(my_account.compute_category_balance(Category.HOBBIES_SPORT), t2.value + t3.value)
self.assertEqual(my_account.compute_category_balance(Category.HOUSING), 0.0, Currency.EUR)
|
StarcoderdataPython
|
1706663
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Laboratoire de Recherche et
# Développement de l'Epita (LRDE).
#
# This file is part of Spot, a model checking library.
#
# Spot is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Spot is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Run all binaries, and collect the long option associated to each
# short option for easy comparison.
# This script should work with both Python 2 and 3.
from sys import stdout as out
import re
import subprocess
with open('Makefile.am', 'r') as mf:
lines = mf.read()
lines = re.sub('\s*\\\\\s*', ' ', lines)
bin_programs = re.search('bin_PROGRAMS\s*=([\w \t]*)', lines).group(1).split()
optre = re.compile('(-\w), (--[\w=-]+)')
d = {}
for tool in bin_programs:
args = ('./' + tool, '--help')
try:
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
except OSError:
print("Cannot execute " + tool + ", is it compiled?")
exit(1)
popen.wait()
output = popen.communicate()[0].decode('utf-8')
for match in optre.finditer(output):
shortname, longname = match.group(1), match.group(2)
if not shortname in d:
d[shortname] = { longname: tool }
elif not longname in d[shortname]:
d[shortname][longname] = tool
else:
w = ('%29s' % '') + d[shortname][longname]
w = w[w.rfind('\n') + 1 : -1]
if len(w + ' ' + tool) < 80:
d[shortname][longname] += ' ' + tool
else:
d[shortname][longname] += '\n%29s%s' % ('', tool)
# The lambda function works around the fact that x might be an str or
# a unicode object depending on the Python implementation.
for shortname in sorted(d, key=lambda x: x.lower()):
out.write(shortname)
first=''
for longname in sorted(d[shortname]):
out.write('%s %-24s %s\n' % (first, longname, d[shortname][longname]))
first=' '
|
StarcoderdataPython
|
3302037
|
<reponame>lapras-inc/disk-embedding
import luigi
import named_tasks as named
import pandas as pd
import target
class _GatherResults(luigi.Task):
def run(self):
results = []
for req in self.requires():
if not req.complete():
continue
inp = req.output()
dct = inp["results"].load()
dct.update(dct["model_parameters"])
del dct["model_parameters"]
results.append(dct)
results = pd.DataFrame(results).fillna("-")
piv = results.pivot_table(index=("model_class", "metric", "loss"), columns=("dim", "task_type"), values="best_test_f1").sort_index()
print(piv)
self.output().dump(piv)
def output(self):
return target.CsvTarget(self.filename())
def filename(self):
raise NotImplementedError()
class WordNetNounTask(_GatherResults):
wn = luigi.Parameter("noun")
def filename(self):
return "./data/results_wn_noun.csv"
def requires(self):
classes = [
named.HypConesTask,
named.PoincareNIPSTask,
named.OrderEmbTask,
named.EucDiskEmbTask,
named.SphericalDiskEmbTask,
named.HypDiskEmbTask
]
for cls in classes:
for dim in [5,10]:
for task_type in ["0percent", "10percent", "25percent", "50percent"]:
for seed in range(1):
yield self.clone(cls, dim=dim, task_type=task_type, seed=seed)
class WordNetNounRevTask(_GatherResults):
wn = luigi.Parameter("r_noun")
def filename(self):
return "./data/results_wn_noun_rev.csv"
def requires(self):
classes = [
named.HypConesTask,
named.PoincareNIPSTask,
named.OrderEmbTask,
named.EucDiskEmbTask,
named.SphericalDiskEmbTask,
named.HypDiskEmbTask
]
for cls in classes:
for dim in [5,10]:
for task_type in ["0percent", "10percent", "25percent", "50percent"]:
for seed in range(1):
yield self.clone(cls, dim=dim, task_type=task_type, seed=seed)
class RunAll(luigi.WrapperTask):
def requires(self):
yield WordNetNounTask()
yield WordNetNounRevTask()
if __name__ == '__main__':
luigi.run()
|
StarcoderdataPython
|
3282788
|
<filename>eynnyd/internal/wsgi/empty_response_body.py
from eynnyd.internal.wsgi.abstract_response_body import AbstractResponseBody
class EmptyResponseBody(AbstractResponseBody):
def get_body(self):
return []
|
StarcoderdataPython
|
1690308
|
<gh_stars>1-10
"""Helper functions to load knowledge graphs."""
from .datasets import load_from_csv, load_from_rdf, load_fb15k, load_wn18, load_fb15k_237, load_from_ntriples, \
load_yago3_10, load_wn18rr
__all__ = ['load_from_csv', 'load_from_rdf', 'load_from_ntriples', 'load_wn18', 'load_fb15k',
'load_fb15k_237', 'load_yago3_10', 'load_wn18rr']
|
StarcoderdataPython
|
34693
|
from ctypes.util import find_library as _find_library
print(_find_library('sndfile'))
print('test fine')
|
StarcoderdataPython
|
3240044
|
def rgb_to_xy(red, green, blue):
""" conversion of RGB colors to CIE1931 XY colors
Formulas implemented from: https://gist.github.com/popcorn245/30afa0f98eea1c2fd34d
Parameters:
red (float): a number between 0.0 and 1.0 representing red in the RGB space
green (float): a number between 0.0 and 1.0 representing green in the RGB space
blue (float): a number between 0.0 and 1.0 representing blue in the RGB space
Returns:
xy (list): x and y
"""
# gamma correction
red = pow((red + 0.055) / (1.0 + 0.055),
2.4) if red > 0.04045 else (red / 12.92)
green = pow((green + 0.055) / (1.0 + 0.055),
2.4) if green > 0.04045 else (green / 12.92)
blue = pow((blue + 0.055) / (1.0 + 0.055),
2.4) if blue > 0.04045 else (blue / 12.92)
# convert rgb to xyz
x = red * 0.649926 + green * 0.103455 + blue * 0.197109
y = red * 0.234327 + green * 0.743075 + blue * 0.022598
z = green * 0.053077 + blue * 1.035763
# convert xyz to xy
x = x / (x + y + z)
y = y / (x + y + z)
return [x, y]
|
StarcoderdataPython
|
3285300
|
'''functions to work with contrasts for multiple tests
contrast matrices for comparing all pairs, all levels to reference level, ...
extension to 2-way groups in progress
TwoWay: class for bringing two-way analysis together and try out
various helper functions
Idea for second part
- get all transformation matrices to move in between different full rank
parameterizations
- standardize to one parameterization to get all interesting effects.
- multivariate normal distribution
- exploit or expand what we have in LikelihoodResults, cov_params, f_test,
t_test, example: resols_dropf_full.cov_params(C2)
- connect to new multiple comparison for contrast matrices, based on
multivariate normal or t distribution (Hothorn, Bretz, Westfall)
'''
from numpy.testing import assert_equal
import numpy as np
#next 3 functions copied from multicomp.py
def contrast_allpairs(nm):
'''contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons
'''
contr = []
for i in range(nm):
for j in range(i+1, nm):
contr_row = np.zeros(nm)
contr_row[i] = 1
contr_row[j] = -1
contr.append(contr_row)
return np.array(contr)
def contrast_all_one(nm):
'''contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons
'''
contr = np.column_stack((np.ones(nm-1), -np.eye(nm-1)))
return contr
def contrast_diff_mean(nm):
'''contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons
'''
return np.eye(nm) - np.ones((nm,nm))/nm
def signstr(x, noplus=False):
if x in [-1,0,1]:
if not noplus:
return '+' if np.sign(x)>=0 else '-'
else:
return '' if np.sign(x)>=0 else '-'
else:
return str(x)
def contrast_labels(contrasts, names, reverse=False):
if reverse:
sl = slice(None, None, -1)
else:
sl = slice(None)
labels = [''.join(['%s%s' % (signstr(c, noplus=True),v)
for c,v in zip(row, names)[sl] if c != 0])
for row in contrasts]
return labels
def contrast_product(names1, names2, intgroup1=None, intgroup2=None, pairs=False):
'''build contrast matrices for products of two categorical variables
this is an experimental script and should be converted to a class
Parameters
----------
names1, names2 : lists of strings
contains the list of level labels for each categorical variable
intgroup1, intgroup2 : ndarrays TODO: this part not tested, finished yet
categorical variable
Notes
-----
This creates a full rank matrix. It does not do all pairwise comparisons,
parameterization is using contrast_all_one to get differences with first
level.
? does contrast_all_pairs work as a plugin to get all pairs ?
'''
n1 = len(names1)
n2 = len(names2)
names_prod = ['%s_%s' % (i,j) for i in names1 for j in names2]
ee1 = np.zeros((1,n1))
ee1[0,0] = 1
if not pairs:
dd = np.r_[ee1, -contrast_all_one(n1)]
else:
dd = np.r_[ee1, -contrast_allpairs(n1)]
contrast_prod = np.kron(dd[1:], np.eye(n2))
names_contrast_prod0 = contrast_labels(contrast_prod, names_prod, reverse=True)
names_contrast_prod = [''.join(['%s%s' % (signstr(c, noplus=True),v)
for c,v in zip(row, names_prod)[::-1] if c != 0])
for row in contrast_prod]
ee2 = np.zeros((1,n2))
ee2[0,0] = 1
#dd2 = np.r_[ee2, -contrast_all_one(n2)]
if not pairs:
dd2 = np.r_[ee2, -contrast_all_one(n2)]
else:
dd2 = np.r_[ee2, -contrast_allpairs(n2)]
contrast_prod2 = np.kron(np.eye(n1), dd2[1:])
names_contrast_prod2 = [''.join(['%s%s' % (signstr(c, noplus=True),v)
for c,v in zip(row, names_prod)[::-1] if c != 0])
for row in contrast_prod2]
if (intgroup1 is not None) and (intgroup1 is not None):
d1, _ = dummy_1d(intgroup1)
d2, _ = dummy_1d(intgroup2)
dummy = dummy_product(d1, d2)
else:
dummy = None
return (names_prod, contrast_prod, names_contrast_prod,
contrast_prod2, names_contrast_prod2, dummy)
def dummy_1d(x, varname=None):
'''dummy variable for id integer groups
Parameters
----------
x : ndarray, 1d
categorical variable, requires integers if varname is None
varname : string
name of the variable used in labels for category levels
Returns
-------
dummy : ndarray, 2d
array of dummy variables, one column for each level of the
category (full set)
labels : list of strings
labels for the columns, i.e. levels of each category
Notes
-----
use tools.categorical instead for more more options
See Also
--------
statsmodels.tools.categorical
Examples
--------
>>> x = np.array(['F', 'F', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'F', 'M', 'M'],
dtype='|S1')
>>> dummy_1d(x, varname='gender')
(array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]), ['gender_F', 'gender_M'])
'''
if varname is None: #assumes integer
labels = ['level_%d' % i for i in range(x.max() + 1)]
return (x[:,None]==np.arange(x.max()+1)).astype(int), labels
else:
grouplabels = np.unique(x)
labels = [varname + '_%s' % str(i) for i in grouplabels]
return (x[:,None]==grouplabels).astype(int), labels
def dummy_product(d1, d2, method='full'):
'''dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, assumes full set for methods 'drop-last'
and 'drop-first'
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, encoding of intersection of
categories.
The drop methods provide a difference dummy encoding:
(constant, main effects, interaction effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
dummy matrix.
Returns
-------
dummy : ndarray
dummy variable for product, see method
'''
if method == 'full':
dd = (d1[:,:,None]*d2[:,None,:]).reshape(d1.shape[0],-1)
elif method == 'drop-last': #same as SAS transreg
d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,:-1], d2[:,:-1],d12rl))
#Note: dtype int should preserve dtype of d1 and d2
elif method == 'drop-first':
d12r = dummy_product(d1[:,1:], d2[:,1:])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,1:], d2[:,1:],d12r))
else:
raise ValueError('method not recognized')
return dd
def dummy_limits(d):
'''start and endpoints of groups in a sorted dummy variable array
helper function for nested categories
Examples
--------
>>> d1 = np.array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
>>> dummy_limits(d1)
(array([0, 4, 8]), array([ 4, 8, 12]))
get group slices from an array
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])]
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])]
'''
nobs, nvars = d.shape
start1, col1 = np.nonzero(np.diff(d,axis=0)==1)
end1, col1_ = np.nonzero(np.diff(d,axis=0)==-1)
cc = np.arange(nvars)
#print(cc, np.r_[[0], col1], np.r_[col1_, [nvars-1]]
if ((not (np.r_[[0], col1] == cc).all())
or (not (np.r_[col1_, [nvars-1]] == cc).all())):
raise ValueError('dummy variable is not sorted')
start = np.r_[[0], start1+1]
end = np.r_[end1+1, [nobs]]
return start, end
def dummy_nested(d1, d2, method='full'):
'''unfinished and incomplete mainly copy past dummy_product
dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, d2 is assumed to be nested in d1
Assumes full set for methods 'drop-last' and 'drop-first'.
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, which in this case is d2.
The drop methods provide an effects encoding:
(constant, main effects, subgroup effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
encoding.
Returns
-------
dummy : ndarray
dummy variable for product, see method
'''
if method == 'full':
return d2
start1, end1 = dummy_limits(d1)
start2, end2 = dummy_limits(d2)
first = np.in1d(start2, start1)
last = np.in1d(end2, end1)
equal = (first == last)
col_dropf = ~first*~equal
col_dropl = ~last*~equal
if method == 'drop-last':
d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,:-1], d2[:,col_dropl]))
#Note: dtype int should preserve dtype of d1 and d2
elif method == 'drop-first':
d12r = dummy_product(d1[:,1:], d2[:,1:])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,1:], d2[:,col_dropf]))
else:
raise ValueError('method not recognized')
return dd, col_dropf, col_dropl
class DummyTransform(object):
'''Conversion between full rank dummy encodings
y = X b + u
b = C a
a = C^{-1} b
y = X C a + u
define Z = X C, then
y = Z a + u
contrasts:
R_b b = r
R_a a = R_b C a = r
where R_a = R_b C
Here C is the transform matrix, with dot_left and dot_right as the main
methods, and the same for the inverse transform matrix, C^{-1}
Note:
- The class was mainly written to keep left and right straight.
- No checking is done.
- not sure yet if method names make sense
'''
def __init__(self, d1, d2):
'''C such that d1 C = d2, with d1 = X, d2 = Z
should be (x, z) in arguments ?
'''
self.transf_matrix = np.linalg.lstsq(d1, d2, rcond=-1)[0]
self.invtransf_matrix = np.linalg.lstsq(d2, d1, rcond=-1)[0]
def dot_left(self, a):
''' b = C a
'''
return np.dot(self.transf_matrix, a)
def dot_right(self, x):
''' z = x C
'''
return np.dot(x, self.transf_matrix)
def inv_dot_left(self, b):
''' a = C^{-1} b
'''
return np.dot(self.invtransf_matrix, b)
def inv_dot_right(self, z):
''' x = z C^{-1}
'''
return np.dot(z, self.invtransf_matrix)
def groupmean_d(x, d):
'''groupmeans using dummy variables
Parameters
----------
x : array_like, ndim
data array, tested for 1,2 and 3 dimensions
d : ndarray, 1d
dummy variable, needs to have the same length
as x in axis 0.
Returns
-------
groupmeans : ndarray, ndim-1
means for each group along axis 0, the levels
of the groups are the last axis
Notes
-----
This will be memory intensive if there are many levels
in the categorical variable, i.e. many columns in the
dummy variable. In this case it is recommended to use
a more efficient version.
'''
x = np.asarray(x)
## if x.ndim == 1:
## nvars = 1
## else:
nvars = x.ndim + 1
sli = [slice(None)] + [None]*(nvars-2) + [slice(None)]
return (x[...,None] * d[sli]).sum(0)*1./d.sum(0)
class TwoWay(object):
'''a wrapper class for two way anova type of analysis with OLS
currently mainly to bring things together
Notes
-----
unclear: adding multiple test might assume block design or orthogonality
This estimates the full dummy version with OLS.
The drop first dummy representation can be recovered through the
transform method.
TODO: add more methods, tests, pairwise, multiple, marginal effects
try out what can be added for userfriendly access.
missing: ANOVA table
'''
def __init__(self, endog, factor1, factor2, varnames=None):
self.nobs = factor1.shape[0]
if varnames is None:
vname1 = 'a'
vname2 = 'b'
else:
vname1, vname1 = varnames
self.d1, self.d1_labels = d1, d1_labels = dummy_1d(factor1, vname1)
self.d2, self.d2_labels = d2, d2_labels = dummy_1d(factor2, vname2)
self.nlevel1 = nlevel1 = d1.shape[1]
self.nlevel2 = nlevel2 = d2.shape[1]
#get product dummies
res = contrast_product(d1_labels, d2_labels)
prodlab, C1, C1lab, C2, C2lab, _ = res
self.prod_label, self.C1, self.C1_label, self.C2, self.C2_label, _ = res
dp_full = dummy_product(d1, d2, method='full')
dp_dropf = dummy_product(d1, d2, method='drop-first')
self.transform = DummyTransform(dp_full, dp_dropf)
#estimate the model
self.nvars = dp_full.shape[1]
self.exog = dp_full
self.resols = sm.OLS(endog, dp_full).fit()
self.params = self.resols.params
#get transformed parameters, (constant, main, interaction effect)
self.params_dropf = self.transform.inv_dot_left(self.params)
self.start_interaction = 1 + (nlevel1 - 1) + (nlevel2 - 1)
self.n_interaction = self.nvars - self.start_interaction
#convert to cached property
def r_nointer(self):
'''contrast/restriction matrix for no interaction
'''
nia = self.n_interaction
R_nointer = np.hstack((np.zeros((nia, self.nvars-nia)), np.eye(nia)))
#inter_direct = resols_full_dropf.tval[-nia:]
R_nointer_transf = self.transform.inv_dot_right(R_nointer)
self.R_nointer_transf = R_nointer_transf
return R_nointer_transf
def ttest_interaction(self):
'''ttests for no-interaction terms are zero
'''
#use self.r_nointer instead
nia = self.n_interaction
R_nointer = np.hstack((np.zeros((nia, self.nvars-nia)), np.eye(nia)))
#inter_direct = resols_full_dropf.tval[-nia:]
R_nointer_transf = self.transform.inv_dot_right(R_nointer)
self.R_nointer_transf = R_nointer_transf
t_res = self.resols.t_test(R_nointer_transf)
return t_res
def ftest_interaction(self):
'''ttests for no-interaction terms are zero
'''
R_nointer_transf = self.r_nointer()
return self.resols.f_test(R_nointer_transf)
def ttest_conditional_effect(self, factorind):
if factorind == 1:
return self.resols.t_test(self.C1), self.C1_label
else:
return self.resols.t_test(self.C2), self.C2_label
def summary_coeff(self):
from statsmodels.iolib import SimpleTable
params_arr = self.params.reshape(self.nlevel1, self.nlevel2)
stubs = self.d1_labels
headers = self.d2_labels
title = 'Estimated Coefficients by factors'
table_fmt = dict(
data_fmts = ["%#10.4g"]*self.nlevel2)
return SimpleTable(params_arr, headers, stubs, title=title,
txt_fmt=table_fmt)
# --------------- tests
# TODO: several tests still missing, several are in the example with print
class TestContrastTools(object):
def __init__(self):
self.v1name = ['a0', 'a1', 'a2']
self.v2name = ['b0', 'b1']
self.d1 = np.array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
def test_dummy_1d(self):
x = np.array(['F', 'F', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'F', 'M', 'M'],
dtype='|S1')
d, labels = (np.array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]), ['gender_F', 'gender_M'])
res_d, res_labels = dummy_1d(x, varname='gender')
assert_equal(res_d, d)
assert_equal(res_labels, labels)
def test_contrast_product(self):
res_cp = contrast_product(self.v1name, self.v2name)
res_t = [0]*6
res_t[0] = ['a0_b0', 'a0_b1', 'a1_b0', 'a1_b1', 'a2_b0', 'a2_b1']
res_t[1] = np.array([[-1., 0., 1., 0., 0., 0.],
[ 0., -1., 0., 1., 0., 0.],
[-1., 0., 0., 0., 1., 0.],
[ 0., -1., 0., 0., 0., 1.]])
res_t[2] = ['a1_b0-a0_b0', 'a1_b1-a0_b1', 'a2_b0-a0_b0', 'a2_b1-a0_b1']
res_t[3] = np.array([[-1., 1., 0., 0., 0., 0.],
[ 0., 0., -1., 1., 0., 0.],
[ 0., 0., 0., 0., -1., 1.]])
res_t[4] = ['a0_b1-a0_b0', 'a1_b1-a1_b0', 'a2_b1-a2_b0']
for ii in range(5):
np.testing.assert_equal(res_cp[ii], res_t[ii], err_msg=str(ii))
def test_dummy_limits(self):
b,e = dummy_limits(self.d1)
assert_equal(b, np.array([0, 4, 8]))
assert_equal(e, np.array([ 4, 8, 12]))
if __name__ == '__main__':
tt = TestContrastTools()
tt.test_contrast_product()
tt.test_dummy_1d()
tt.test_dummy_limits()
import statsmodels.api as sm
examples = ['small', 'large', None][1]
v1name = ['a0', 'a1', 'a2']
v2name = ['b0', 'b1']
res_cp = contrast_product(v1name, v2name)
print(res_cp)
y = np.arange(12)
x1 = np.arange(12)//4
x2 = np.arange(12)//2 % 2
if 'small' in examples:
d1, d1_labels = dummy_1d(x1)
d2, d2_labels = dummy_1d(x2)
if 'large' in examples:
x1 = np.repeat(x1, 5, axis=0)
x2 = np.repeat(x2, 5, axis=0)
nobs = x1.shape[0]
d1, d1_labels = dummy_1d(x1)
d2, d2_labels = dummy_1d(x2)
dd_full = dummy_product(d1, d2, method='full')
dd_dropl = dummy_product(d1, d2, method='drop-last')
dd_dropf = dummy_product(d1, d2, method='drop-first')
#Note: full parameterization of dummies is orthogonal
#np.eye(6)*10 in "large" example
print((np.dot(dd_full.T, dd_full) == np.diag(dd_full.sum(0))).all())
#check that transforms work
#generate 3 data sets with the 3 different parameterizations
effect_size = [1., 0.01][1]
noise_scale = [0.001, 0.1][0]
noise = noise_scale * np.random.randn(nobs)
beta = effect_size * np.arange(1,7)
ydata_full = (dd_full * beta).sum(1) + noise
ydata_dropl = (dd_dropl * beta).sum(1) + noise
ydata_dropf = (dd_dropf * beta).sum(1) + noise
resols_full_full = sm.OLS(ydata_full, dd_full).fit()
resols_full_dropf = sm.OLS(ydata_full, dd_dropf).fit()
params_f_f = resols_full_full.params
params_f_df = resols_full_dropf.params
resols_dropf_full = sm.OLS(ydata_dropf, dd_full).fit()
resols_dropf_dropf = sm.OLS(ydata_dropf, dd_dropf).fit()
params_df_f = resols_dropf_full.params
params_df_df = resols_dropf_dropf.params
tr_of = np.linalg.lstsq(dd_dropf, dd_full, rcond=-1)[0]
tr_fo = np.linalg.lstsq(dd_full, dd_dropf, rcond=-1)[0]
print(np.dot(tr_fo, params_df_df) - params_df_f)
print(np.dot(tr_of, params_f_f) - params_f_df)
transf_f_df = DummyTransform(dd_full, dd_dropf)
print(np.max(np.abs((dd_full - transf_f_df.inv_dot_right(dd_dropf)))))
print(np.max(np.abs((dd_dropf - transf_f_df.dot_right(dd_full)))))
print(np.max(np.abs((params_df_df
- transf_f_df.inv_dot_left(params_df_f)))))
np.max(np.abs((params_f_df
- transf_f_df.inv_dot_left(params_f_f))))
prodlab, C1, C1lab, C2, C2lab,_ = contrast_product(v1name, v2name)
print('\ntvalues for no effect of factor 1')
print('each test is conditional on a level of factor 2')
print(C1lab)
print(resols_dropf_full.t_test(C1).tvalue)
print('\ntvalues for no effect of factor 2')
print('each test is conditional on a level of factor 1')
print(C2lab)
print(resols_dropf_full.t_test(C2).tvalue)
#covariance matrix of restrictions C2, note: orthogonal
resols_dropf_full.cov_params(C2)
#testing for no interaction effect
R_noint = np.hstack((np.zeros((2,4)), np.eye(2)))
inter_direct = resols_full_dropf.tvalues[-2:]
inter_transf = resols_full_full.t_test(transf_f_df.inv_dot_right(R_noint)).tvalue
print(np.max(np.abs((inter_direct - inter_transf))))
#now with class version
tw = TwoWay(ydata_dropf, x1, x2)
print(tw.ttest_interaction().tvalue)
print(tw.ttest_interaction().pvalue)
print(tw.ftest_interaction().fvalue)
print(tw.ftest_interaction().pvalue)
print(tw.ttest_conditional_effect(1)[0].tvalue)
print(tw.ttest_conditional_effect(2)[0].tvalue)
print(tw.summary_coeff())
''' documentation for early examples while developing - some have changed already
>>> y = np.arange(12)
>>> y
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
>>> x1 = np.arange(12)//4
>>> x1
array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
>>> x2 = np.arange(12)//2%2
>>> x2
array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1])
>>> d1 = dummy_1d(x1)
>>> d1
array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
>>> d2 = dummy_1d(x2)
>>> d2
array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]])
>>> d12 = dummy_product(d1, d2)
>>> d12
array([[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]])
>>> d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
>>> np.column_stack((np.ones(d1.shape[0]), d1[:,:-1], d2[:,:-1],d12rl))
array([[ 1., 1., 0., 1., 1., 0.],
[ 1., 1., 0., 1., 1., 0.],
[ 1., 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0., 0.],
[ 1., 0., 1., 1., 0., 1.],
[ 1., 0., 1., 1., 0., 1.],
[ 1., 0., 1., 0., 0., 0.],
[ 1., 0., 1., 0., 0., 0.],
[ 1., 0., 0., 1., 0., 0.],
[ 1., 0., 0., 1., 0., 0.],
[ 1., 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0., 0.]])
'''
#nprod = ['%s_%s' % (i,j) for i in ['a0', 'a1', 'a2'] for j in ['b0', 'b1']]
#>>> [''.join(['%s%s' % (signstr(c),v) for c,v in zip(row, nprod) if c != 0])
# for row in np.kron(dd[1:], np.eye(2))]
'''
>>> nprod = ['%s_%s' % (i,j) for i in ['a0', 'a1', 'a2'] for j in ['b0', 'b1']]
>>> nprod
['a0_b0', 'a0_b1', 'a1_b0', 'a1_b1', 'a2_b0', 'a2_b1']
>>> [''.join(['%s%s' % (signstr(c),v) for c,v in zip(row, nprod) if c != 0]) for row in np.kron(dd[1:], np.eye(2))]
['-a0b0+a1b0', '-a0b1+a1b1', '-a0b0+a2b0', '-a0b1+a2b1']
>>> [''.join(['%s%s' % (signstr(c),v) for c,v in zip(row, nprod)[::-1] if c != 0]) for row in np.kron(dd[1:], np.eye(2))]
['+a1_b0-a0_b0', '+a1_b1-a0_b1', '+a2_b0-a0_b0', '+a2_b1-a0_b1']
>>> np.r_[[[1,0,0,0,0]],contrast_all_one(5)]
array([[ 1., 0., 0., 0., 0.],
[ 1., -1., 0., 0., 0.],
[ 1., 0., -1., 0., 0.],
[ 1., 0., 0., -1., 0.],
[ 1., 0., 0., 0., -1.]])
>>> idxprod = [(i,j) for i in range(3) for j in range(2)]
>>> idxprod
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
>>> np.array(idxprod).reshape(2,3,2,order='F')[:,:,0]
array([[0, 1, 2],
[0, 1, 2]])
>>> np.array(idxprod).reshape(2,3,2,order='F')[:,:,1]
array([[0, 0, 0],
[1, 1, 1]])
>>> dd3_ = np.r_[[[0,0,0]],contrast_all_one(3)]
pairwise contrasts and reparameterization
dd = np.r_[[[1,0,0,0,0]],-contrast_all_one(5)]
>>> dd
array([[ 1., 0., 0., 0., 0.],
[-1., 1., 0., 0., 0.],
[-1., 0., 1., 0., 0.],
[-1., 0., 0., 1., 0.],
[-1., 0., 0., 0., 1.]])
>>> np.dot(dd.T, np.arange(5))
array([-10., 1., 2., 3., 4.])
>>> np.round(np.linalg.inv(dd.T)).astype(int)
array([[1, 1, 1, 1, 1],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
>>> np.round(np.linalg.inv(dd)).astype(int)
array([[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 0, 1]])
>>> dd
array([[ 1., 0., 0., 0., 0.],
[-1., 1., 0., 0., 0.],
[-1., 0., 1., 0., 0.],
[-1., 0., 0., 1., 0.],
[-1., 0., 0., 0., 1.]])
>>> ddinv=np.round(np.linalg.inv(dd.T)).astype(int)
>>> np.dot(ddinv, np.arange(5))
array([10, 1, 2, 3, 4])
>>> np.dot(dd, np.arange(5))
array([ 0., 1., 2., 3., 4.])
>>> np.dot(dd, 5+np.arange(5))
array([ 5., 1., 2., 3., 4.])
>>> ddinv2 = np.round(np.linalg.inv(dd)).astype(int)
>>> np.dot(ddinv2, np.arange(5))
array([0, 1, 2, 3, 4])
>>> np.dot(ddinv2, 5+np.arange(5))
array([ 5, 11, 12, 13, 14])
>>> np.dot(ddinv2, [5, 0, 0 , 1, 2])
array([5, 5, 5, 6, 7])
>>> np.dot(ddinv2, dd)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
>>> dd3 = -np.r_[[[1,0,0]],contrast_all_one(3)]
>>> dd2 = -np.r_[[[1,0]],contrast_all_one(2)]
>>> np.kron(np.eye(3), dd2)
array([[-1., 0., 0., 0., 0., 0.],
[-1., 1., 0., 0., 0., 0.],
[ 0., 0., -1., 0., 0., 0.],
[ 0., 0., -1., 1., 0., 0.],
[ 0., 0., 0., 0., -1., 0.],
[ 0., 0., 0., 0., -1., 1.]])
>>> dd2
array([[-1., 0.],
[-1., 1.]])
>>> np.kron(np.eye(3), dd2[1:])
array([[-1., 1., 0., 0., 0., 0.],
[ 0., 0., -1., 1., 0., 0.],
[ 0., 0., 0., 0., -1., 1.]])
>>> np.kron(dd[1:], np.eye(2))
array([[-1., 0., 1., 0., 0., 0.],
[ 0., -1., 0., 1., 0., 0.],
[-1., 0., 0., 0., 1., 0.],
[ 0., -1., 0., 0., 0., 1.]])
d_ = np.r_[[[1,0,0,0,0]],contrast_all_one(5)]
>>> d_
array([[ 1., 0., 0., 0., 0.],
[ 1., -1., 0., 0., 0.],
[ 1., 0., -1., 0., 0.],
[ 1., 0., 0., -1., 0.],
[ 1., 0., 0., 0., -1.]])
>>> np.round(np.linalg.pinv(d_)).astype(int)
array([[ 1, 0, 0, 0, 0],
[ 1, -1, 0, 0, 0],
[ 1, 0, -1, 0, 0],
[ 1, 0, 0, -1, 0],
[ 1, 0, 0, 0, -1]])
>>> np.linalg.inv(d_).astype(int)
array([[ 1, 0, 0, 0, 0],
[ 1, -1, 0, 0, 0],
[ 1, 0, -1, 0, 0],
[ 1, 0, 0, -1, 0],
[ 1, 0, 0, 0, -1]])
group means
>>> sli = [slice(None)] + [None]*(3-2) + [slice(None)]
>>> (np.column_stack((y, x1, x2))[...,None] * d1[sli]).sum(0)*1./d1.sum(0)
array([[ 1.5, 5.5, 9.5],
[ 0. , 1. , 2. ],
[ 0.5, 0.5, 0.5]])
>>> [(z[:,None] * d1).sum(0)*1./d1.sum(0) for z in np.column_stack((y, x1, x2)).T]
[array([ 1.5, 5.5, 9.5]), array([ 0., 1., 2.]), array([ 0.5, 0.5, 0.5])]
>>>
'''
|
StarcoderdataPython
|
38626
|
music = {
'kb': '''
Instrument(Flute)
Piece(Undine, Reinecke)
Piece(Carmen, Bourne)
(Instrument(x) & Piece(w, c) & Era(c, r)) ==> Program(w)
Era(Reinecke, Romantic)
Era(Bourne, Romantic)
''',
'queries': '''
Program(x)
''',
}
life = {
'kb': '''
Musician(x) ==> Stressed(x)
(Student(x) & Text(y)) ==> Stressed(x)
Musician(Heather)
''',
'queries': '''
Stressed(x)
'''
}
Examples = {
'music': music,
'life': life
}
|
StarcoderdataPython
|
3357414
|
# Uses python3
import sys
def get_fibonacci_last_digit(n):
if n < 2:
return n
prev = 1
cur = 1
for i in range(2, n):
prev, cur = cur, prev + cur % 10
return cur % 10
if __name__ == '__main__':
print(get_fibonacci_last_digit(int(input())))
|
StarcoderdataPython
|
192423
|
import networkx as nx
import matplotlib.pyplot as plt
import random
def bipartite(numNodes):
odds=[]
evens=[]
colours=[]
for i in range(1,numNodes+1,2):
odds.append(i)
colours.append('red')
for i in range(2,numNodes+1,2):
evens.append(i)
colours.append('blue')
B = nx.Graph()
B.add_nodes_from(odds, bipartite=0)
B.add_nodes_from(evens, bipartite=1)
for i in range(1,numNodes):
B.add_edge(i,i+1)
#just adds a few more edges on
if numNodes>=3:
for j in range(1,numNodes):
x=random.uniform(0, 1)
if x>0.6:
y=random.randint(1, len(evens)-1)
z=random.randint(1, len(odds)-1)
if z!=y:
B.add_edge(odds[z],evens[y])
lhs = nx.bipartite.sets(B)[0]
positions = nx.bipartite_layout(B, lhs,scale=40)
nx.draw_networkx_labels(B, pos=positions)
nx.draw(B, pos=positions,node_color=colours)
plt.savefig((str(numNodes)+"bipartite.png"), dpi=300)
plt.show()
bipartite(10)
|
StarcoderdataPython
|
1683551
|
<gh_stars>0
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
import pandas as pd
from helper_funcs import getTOlist
def print_solution(data, manager, routing, solution):
"""Prints solution on console."""
total_distance = 0
total_load = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
route_distance = 0
route_load = 0
while not routing.IsEnd(index):
node_index = manager.IndexToNode(index)
route_load += data['demands'][node_index]
plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(
previous_index, index, vehicle_id)
plan_output += ' {0} Load({1})\n'.format(manager.IndexToNode(index),
route_load)
plan_output += 'Distance of the route: {}m\n'.format(route_distance)
plan_output += 'Load of the route: {}\n'.format(route_load)
print(plan_output)
total_distance += route_distance
total_load += route_load
print('Total distance of all routes: {}m'.format(total_distance))
print('Total load of all routes: {}'.format(total_load))
# Callback functions (translate internal to external indices)
def distance_callback(from_index, to_index):
"""Returns the manhattan distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['distance_matrix'][from_node][to_node]
def demand_callback(from_index):
"""Returns the demand of the node."""
# Convert from routing variable Index to demands NodeIndex.
from_node = manager.IndexToNode(from_index)
return data['demands'][from_node]
TO_list = getTOlist()
data = {}
#Define Distance Matrix
# Note that a 6th location was added here as the 'depot' from which the trucks leave and return
data['distance_matrix'] = [
[0,1700,2100,22190,14440,24280], #Nuremburg
[1700,0,2430,22530,13690,23540], #Munich
[2100,2430,0,20420,12670,22500], #Stuttgart
[22190,22530,20420,0,11270,5790], #Supplier: Porto
[14440,13690,12670,11270,0,9960], #Supplier: Barcelona
[24280,23540,22500,5790,996,0] #Depot: Seville
]
data['pickups_deliveries'] = []
for i in TO_list:
data['pickups_deliveries'].append([i.origin,i.destination])
<<<<<<< HEAD
data['num_vehicles'] = 6
data['depot']=2 #set starting point for all vehicles
data['demands']= [45000,55000,100000,25000,30000,100]
=======
data['num_vehicles'] = 5
data['depot']=5 #set starting point for all vehicles
data['demands']= [15000, 20000, 10000, 0, 0, 0]
>>>>>>> 0e580aef0e53833f7786fc16ad05aa9656eb8bd4
data['vehicle_capacities']= [25000]*data['num_vehicles'] # length must match num_vehicles
manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),
data['num_vehicles'], data['depot'])
routing = pywrapcp.RoutingModel(manager)
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
demand_callback_index = routing.RegisterUnaryTransitCallback(demand_callback)
# This sets the cost of travel between any two locations (this is where our tariff information will
# eventually go)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# NOTE:
# You can also define multiple arc cost evaluators that depend on which
# vehicle is traveling between locations, using the method routing.SetArcCostEvaluatorOfVehicle().
# For example, if the vehicles have different speeds, you could define the cost of travel between
# locations to be the distance divided by the vehicle's speed—in other words, the travel time
routing.AddDimension(
transit_callback_index,
0, # no slack
10000, # vehicle maximum travel distance
True, # start cumul to zero
'Distance')
<<<<<<< HEAD
#routing.AddDimensionWithVehicleCapacity(
# demand_callback_index,
# 0, # null capacity slack
# data['vehicle_capacities'], # vehicle maximum capacities
# True, # start cumul to zero
# 'Capacity')
=======
routing.AddDimensionWithVehicleTransitAndCapacity(
demand_callback_index,
0, # null capacity slack
data['vehicle_capacities'], # vehicle maximum capacities
True, # start cumul to zero
'Capacity')
>>>>>>> 0e580aef0e53833f7786fc16ad05aa9656eb8bd4
distance_dimension = routing.GetDimensionOrDie('Distance')
distance_dimension.SetGlobalSpanCostCoefficient(10000)
for request in data['pickups_deliveries']:
pickup_index = manager.NodeToIndex(request[0])
delivery_index = manager.NodeToIndex(request[1])
# Creates a pickup and delivery request
routing.AddPickupAndDelivery(pickup_index, delivery_index)
# Requirement that the item must be picked up and delivered by the same vehicle
routing.solver().Add(
routing.VehicleVar(pickup_index) == routing.VehicleVar(
delivery_index))
# Requirement that item must be picked up before being delivered
routing.solver().Add(
distance_dimension.CumulVar(pickup_index) <=
distance_dimension.CumulVar(delivery_index))
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION)
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
search_parameters.time_limit.FromSeconds(10)
solution = routing.SolveWithParameters(search_parameters)
if solution:
print_solution(data, manager, routing, solution)
else:
print("No solution.")
|
StarcoderdataPython
|
1789318
|
<filename>calico/etcddriver/protocol.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
calico.etcddriver.protocol
~~~~~~~~~~~~~~~~~~~~~~~~~~
Protocol constants for Felix <-> Driver protocol.
"""
import logging
import socket
import errno
from io import BytesIO
import msgpack
import select
_log = logging.getLogger(__name__)
MSG_KEY_TYPE = "type"
# Init message Felix -> Driver.
MSG_TYPE_INIT = "init"
MSG_KEY_ETCD_URLS = "etcd_urls"
MSG_KEY_HOSTNAME = "hostname"
MSG_KEY_KEY_FILE = "etcd_key_file"
MSG_KEY_CERT_FILE = "etcd_cert_file"
MSG_KEY_CA_FILE = "etcd_ca_file"
MSG_KEY_PROM_PORT = "prom_port"
# Config loaded message Driver -> Felix.
MSG_TYPE_CONFIG_LOADED = "config_loaded"
MSG_KEY_GLOBAL_CONFIG = "global"
MSG_KEY_HOST_CONFIG = "host"
# Config message Felix -> Driver.
MSG_TYPE_CONFIG = "conf"
MSG_KEY_LOG_FILE = "log_file"
MSG_KEY_SEV_FILE = "sev_file"
MSG_KEY_SEV_SCREEN = "sev_screen"
MSG_KEY_SEV_SYSLOG = "sev_syslog"
# Status message Driver -> Felix.
MSG_TYPE_STATUS = "stat"
MSG_KEY_STATUS = "status"
STATUS_WAIT_FOR_READY = "wait-for-ready"
STATUS_RESYNC = "resync"
STATUS_IN_SYNC = "in-sync"
# Force resync message Felix->Driver.
MSG_TYPE_RESYNC = "resync"
# Update message Driver -> Felix.
MSG_TYPE_UPDATE = "u"
MSG_KEY_KEY = "k"
MSG_KEY_VALUE = "v"
FLUSH_THRESHOLD = 200
class SocketClosed(Exception):
"""The socket was unexpectedly closed by the other end."""
pass
class WriteFailed(Exception):
"""Write to the socket failed."""
pass
class MessageWriter(object):
"""
Wrapper around a socket used to write protocol messages.
Supports buffering a number of messages for subsequent flush().
"""
def __init__(self, sck):
self._sck = sck
self._buf = BytesIO()
self._updates_pending = 0
def send_message(self, msg_type, fields=None, flush=True):
"""
Send a message of the given type with the given fields.
Optionally, flush the data to the socket.
This method will flush the buffer if it grows too large in any
case.
:param msg_type: one of the MSG_TYPE_* constants.
:param dict fields: dict mapping MSG_KEY_* constants to values.
:param flush: True to force the data to be written immediately.
"""
msg = {MSG_KEY_TYPE: msg_type}
if fields:
msg.update(fields)
self._buf.write(msgpack.dumps(msg))
if flush:
self.flush()
else:
self._maybe_flush()
def _maybe_flush(self):
self._updates_pending += 1
if self._updates_pending > FLUSH_THRESHOLD:
self.flush()
def flush(self):
"""
Flushes the write buffer to the socket immediately.
"""
_log.debug("Flushing the buffer to the socket")
buf_contents = self._buf.getvalue()
if buf_contents:
try:
self._sck.sendall(buf_contents)
except socket.error as e:
_log.exception("Failed to write to socket")
raise WriteFailed(e)
self._buf = BytesIO()
self._updates_pending = 0
class MessageReader(object):
def __init__(self, sck):
self._sck = sck
self._unpacker = msgpack.Unpacker()
def new_messages(self, timeout=1):
"""
Generator: generates 0 or more tuples containing message type and
message body (as a dict).
May generate 0 events in certain conditions even if there are
events available. (If the socket returns EAGAIN, for example.)
:param timeout: Maximum time to block waiting on the socket before
giving up. No exception is raised upon timeout but 0 events
are generated.
:raises SocketClosed if the socket is closed.
:raises socket.error if an unexpected socket error occurs.
"""
if timeout is not None:
read_ready, _, _ = select.select([self._sck], [], [], timeout)
if not read_ready:
return
try:
data = self._sck.recv(16384)
except socket.error as e:
if e.errno in (errno.EAGAIN,
errno.EWOULDBLOCK,
errno.EINTR):
_log.debug("Retryable error on read.")
return
else:
_log.error("Failed to read from socket: %r", e)
raise
if not data:
# No data indicates an orderly shutdown of the socket,
# which shouldn't happen.
_log.error("Socket closed by other end.")
raise SocketClosed()
# Feed the data into the Unpacker, if it has enough data it will then
# generate some messages.
self._unpacker.feed(data)
for msg in self._unpacker:
_log.debug("Unpacked message: %s", msg)
# coverage.py doesn't fully support yield statements.
yield msg[MSG_KEY_TYPE], msg # pragma: nocover
|
StarcoderdataPython
|
1687530
|
from django.shortcuts import render
from django.http import HttpResponse
import datetime
# Create your views here.
def home_view(request, *args,**kwargs):
# TODO: write code...
print('request:', request)
print('request user:', request.user)
print(args, kwargs)
return render(request,"home.html",{})
def contact_view(request, *args,**kwargs):
return render(request,"contact.html",{})
def about_view(request, *args,**kwargs):
my_context = {
'author': 'the author is <NAME>',
'context_introduction': 'The website is about Django learning',
'context_number': 123456,
'context_list': [123,234,345,456,567,'abc','zxc'],
'html': '<h1>Hello World!</h1>',
'datetime': datetime.datetime.now(),
}
return render(request,"about.html",my_context)
def products_view(request, *args,**kwargs):
return render(request,"products.html",{})
|
StarcoderdataPython
|
3326067
|
<gh_stars>0
#
# @lc app=leetcode id=105 lang=python3
#
# [105] Construct Binary Tree from Preorder and Inorder Traversal
#
# https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/description/
#
# algorithms
# Medium (47.06%)
# Likes: 2942
# Dislikes: 85
# Total Accepted: 333.7K
# Total Submissions: 708.5K
# Testcase Example: '[3,9,20,15,7]\n[9,3,15,20,7]'
#
# Given preorder and inorder traversal of a tree, construct the binary tree.
#
# Note:
# You may assume that duplicates do not exist in the tree.
#
# For example, given
#
#
# preorder = [3,9,20,15,7]
# inorder = [9,3,15,20,7]
#
# Return the following binary tree:
#
#
# 3
# / \
# 9 20
# / \
# 15 7
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
self.preorder = preorder
self.p_idx = 0
self.total_inorder_pos = {v: i for i, v in enumerate(inorder)}
return self._buildTreeRec(0, len(inorder))
def _buildTreeRec(self, left_in, right_in):
if left_in == right_in:
return None
root = TreeNode(self.preorder[self.p_idx])
i_idx = self.total_inorder_pos[root.val]
self.p_idx += 1
root.left = self._buildTreeRec(left_in, i_idx)
root.right = self._buildTreeRec(i_idx + 1, right_in)
return root
# @lc code=end
|
StarcoderdataPython
|
3308187
|
import base64
import datetime
import json
import os
import pickle
import struct
import sys
import unittest
import uuid
from tornado import testing
import umsgpack
from sprockets.mixins.mediatype import content, handlers, transcoders
import examples
class UTC(datetime.tzinfo):
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
def tzname(self, dt):
return 'UTC'
class Context(object):
"""Super simple class to call setattr on"""
def __init__(self):
self.settings = {}
def pack_string(obj):
"""Optimally pack a string according to msgpack format"""
payload = str(obj).encode('ASCII')
l = len(payload)
if l < (2 ** 5):
prefix = struct.pack('B', 0b10100000 | l)
elif l < (2 ** 8):
prefix = struct.pack('BB', 0xD9, l)
elif l < (2 ** 16):
prefix = struct.pack('>BH', 0xDA, l)
else:
prefix = struct.pack('>BI', 0xDB, l)
return prefix + payload
def pack_bytes(payload):
"""Optimally pack a byte string according to msgpack format"""
l = len(payload)
if l < (2 ** 8):
prefix = struct.pack('BB', 0xC4, l)
elif l < (2 ** 16):
prefix = struct.pack('>BH', 0xC5, l)
else:
prefix = struct.pack('>BI', 0xC6, l)
return prefix + payload
class SendResponseTests(testing.AsyncHTTPTestCase):
def get_app(self):
return examples.make_application(debug=True)
def test_that_content_type_default_works(self):
response = self.fetch('/', method='POST', body='{}',
headers={'Content-Type': 'application/json'})
self.assertEqual(response.code, 200)
self.assertEqual(response.headers['Content-Type'],
'application/json; charset="utf-8"')
def test_that_missing_content_type_uses_default(self):
response = self.fetch('/', method='POST', body='{}',
headers={'Accept': 'application/xml',
'Content-Type': 'application/json'})
self.assertEqual(response.code, 200)
self.assertEqual(response.headers['Content-Type'],
'application/json; charset="utf-8"')
def test_that_accept_header_is_obeyed(self):
response = self.fetch('/', method='POST', body='{}',
headers={'Accept': 'application/msgpack',
'Content-Type': 'application/json'})
self.assertEqual(response.code, 200)
self.assertEqual(response.headers['Content-Type'],
'application/msgpack')
def test_that_default_content_type_is_set_on_response(self):
response = self.fetch('/', method='POST', body=umsgpack.packb({}),
headers={'Content-Type': 'application/msgpack'})
self.assertEqual(response.code, 200)
self.assertEqual(response.headers['Content-Type'],
'application/json; charset="utf-8"')
def test_that_vary_header_is_set(self):
response = self.fetch('/', method='POST', body=umsgpack.packb({}),
headers={'Content-Type': 'application/msgpack'})
self.assertEqual(response.code, 200)
self.assertEqual(response.headers['Vary'], 'Accept')
class GetRequestBodyTests(testing.AsyncHTTPTestCase):
def get_app(self):
return examples.make_application(debug=True)
def test_that_request_with_unhandled_type_results_in_415(self):
response = self.fetch(
'/', method='POST', headers={'Content-Type': 'application/xml'},
body=(u'<request><name>value</name>'
u'<embedded><utf8>\u2731</utf8></embedded>'
u'</request>').encode('utf-8'))
self.assertEqual(response.code, 415)
def test_that_msgpack_request_returns_default_type(self):
body = {
'name': 'value',
'embedded': {
'utf8': u'\u2731'
}
}
response = self.fetch('/', method='POST', body=umsgpack.packb(body),
headers={'Content-Type': 'application/msgpack'})
self.assertEqual(response.code, 200)
self.assertEqual(json.loads(response.body.decode('utf-8')), body)
def test_that_invalid_data_returns_400(self):
response = self.fetch(
'/', method='POST', headers={'Content-Type': 'application/json'},
body=('<?xml version="1.0"?><methodCall><methodName>echo'
'</methodName><params><param><value><str>Hi</str></value>'
'</param></params></methodCall>').encode('utf-8'))
self.assertEqual(response.code, 400)
class JSONTranscoderTests(unittest.TestCase):
def setUp(self):
super(JSONTranscoderTests, self).setUp()
self.transcoder = transcoders.JSONTranscoder()
def test_that_uuids_are_dumped_as_strings(self):
obj = {'id': uuid.uuid4()}
dumped = self.transcoder.dumps(obj)
self.assertEqual(dumped.replace(' ', ''), '{"id":"%s"}' % obj['id'])
def test_that_datetimes_are_dumped_in_isoformat(self):
obj = {'now': datetime.datetime.now()}
dumped = self.transcoder.dumps(obj)
self.assertEqual(dumped.replace(' ', ''),
'{"now":"%s"}' % obj['now'].isoformat())
def test_that_tzaware_datetimes_include_tzoffset(self):
obj = {'now': datetime.datetime.now().replace(tzinfo=UTC())}
self.assertTrue(obj['now'].isoformat().endswith('+00:00'))
dumped = self.transcoder.dumps(obj)
self.assertEqual(dumped.replace(' ', ''),
'{"now":"%s"}' % obj['now'].isoformat())
@unittest.skipIf(sys.version_info[0] == 2, 'bytes unsupported on python 2')
def test_that_bytes_are_base64_encoded(self):
bin = bytes(os.urandom(127))
dumped = self.transcoder.dumps({'bin': bin})
self.assertEqual(
dumped, '{"bin":"%s"}' % base64.b64encode(bin).decode('ASCII'))
def test_that_bytearrays_are_base64_encoded(self):
bin = bytearray(os.urandom(127))
dumped = self.transcoder.dumps({'bin': bin})
self.assertEqual(
dumped, '{"bin":"%s"}' % base64.b64encode(bin).decode('ASCII'))
def test_that_memoryviews_are_base64_encoded(self):
bin = memoryview(os.urandom(127))
dumped = self.transcoder.dumps({'bin': bin})
self.assertEqual(
dumped, '{"bin":"%s"}' % base64.b64encode(bin).decode('ASCII'))
def test_that_unhandled_objects_raise_type_error(self):
with self.assertRaises(TypeError):
self.transcoder.dumps(object())
class ContentSettingsTests(unittest.TestCase):
def test_that_handler_listed_in_available_content_types(self):
settings = content.ContentSettings()
settings['application/json'] = object()
self.assertEqual(len(settings.available_content_types), 1)
self.assertEqual(settings.available_content_types[0].content_type,
'application')
self.assertEqual(settings.available_content_types[0].content_subtype,
'json')
def test_that_handler_is_not_overwritten(self):
settings = content.ContentSettings()
settings['application/json'] = handler = object()
settings['application/json'] = object()
self.assertIs(settings.get('application/json'), handler)
def test_that_registered_content_types_are_normalized(self):
settings = content.ContentSettings()
handler = object()
settings['application/json; VerSion=foo; type=WhatEver'] = handler
self.assertIs(settings['application/json; type=whatever; version=foo'],
handler)
self.assertIn('application/json; type=whatever; version=foo',
(str(c) for c in settings.available_content_types))
def test_that_normalized_content_types_do_not_overwrite(self):
settings = content.ContentSettings()
settings['application/json; charset=UTF-8'] = handler = object()
settings['application/json; charset=utf-8'] = object()
self.assertEqual(len(settings.available_content_types), 1)
self.assertEqual(settings.available_content_types[0].content_type,
'application')
self.assertEqual(settings.available_content_types[0].content_subtype,
'json')
self.assertEqual(settings['application/json; charset=utf-8'], handler)
class ContentFunctionTests(unittest.TestCase):
def setUp(self):
super(ContentFunctionTests, self).setUp()
self.context = Context()
def test_that_add_binary_content_type_creates_binary_handler(self):
settings = content.install(self.context,
'application/octet-stream')
content.add_binary_content_type(self.context,
'application/vnd.python.pickle',
pickle.dumps, pickle.loads)
transcoder = settings['application/vnd.python.pickle']
self.assertIsInstance(transcoder, handlers.BinaryContentHandler)
self.assertIs(transcoder._pack, pickle.dumps)
self.assertIs(transcoder._unpack, pickle.loads)
def test_that_add_text_content_type_creates_text_handler(self):
settings = content.install(self.context, 'application/json')
content.add_text_content_type(self.context, 'application/json', 'utf8',
json.dumps, json.loads)
transcoder = settings['application/json']
self.assertIsInstance(transcoder, handlers.TextContentHandler)
self.assertIs(transcoder._dumps, json.dumps)
self.assertIs(transcoder._loads, json.loads)
def test_that_add_text_content_type_discards_charset_parameter(self):
settings = content.install(self.context, 'application/json', 'utf-8')
content.add_text_content_type(self.context,
'application/json;charset=UTF-8', 'utf8',
json.dumps, json.loads)
transcoder = settings['application/json']
self.assertIsInstance(transcoder, handlers.TextContentHandler)
def test_that_install_creates_settings(self):
settings = content.install(self.context, 'application/json', 'utf8')
self.assertIsNotNone(settings)
self.assertEqual(settings.default_content_type, 'application/json')
self.assertEqual(settings.default_encoding, 'utf8')
def test_that_get_settings_returns_none_when_no_settings(self):
settings = content.get_settings(self.context)
self.assertIsNone(settings)
def test_that_get_settings_returns_installed_settings(self):
settings = content.install(self.context, 'application/xml', 'utf8')
other_settings = content.get_settings(self.context)
self.assertIs(settings, other_settings)
def test_that_get_settings_will_create_instance_if_requested(self):
settings = content.get_settings(self.context, force_instance=True)
self.assertIsNotNone(settings)
self.assertIs(content.get_settings(self.context), settings)
class MsgPackTranscoderTests(unittest.TestCase):
def setUp(self):
super(MsgPackTranscoderTests, self).setUp()
self.transcoder = transcoders.MsgPackTranscoder()
def test_that_strings_are_dumped_as_strings(self):
dumped = self.transcoder.packb(u'foo')
self.assertEqual(self.transcoder.unpackb(dumped), 'foo')
self.assertEqual(dumped, pack_string('foo'))
def test_that_none_is_packed_as_nil_byte(self):
self.assertEqual(self.transcoder.packb(None), b'\xC0')
def test_that_bools_are_dumped_appropriately(self):
self.assertEqual(self.transcoder.packb(False), b'\xC2')
self.assertEqual(self.transcoder.packb(True), b'\xC3')
def test_that_ints_are_packed_appropriately(self):
self.assertEqual(self.transcoder.packb((2 ** 7) - 1), b'\x7F')
self.assertEqual(self.transcoder.packb(2 ** 7), b'\xCC\x80')
self.assertEqual(self.transcoder.packb(2 ** 8), b'\xCD\x01\x00')
self.assertEqual(self.transcoder.packb(2 ** 16),
b'\xCE\x00\x01\x00\x00')
self.assertEqual(self.transcoder.packb(2 ** 32),
b'\xCF\x00\x00\x00\x01\x00\x00\x00\x00')
def test_that_negative_ints_are_packed_accordingly(self):
self.assertEqual(self.transcoder.packb(-(2 ** 0)), b'\xFF')
self.assertEqual(self.transcoder.packb(-(2 ** 5)), b'\xE0')
self.assertEqual(self.transcoder.packb(-(2 ** 7)), b'\xD0\x80')
self.assertEqual(self.transcoder.packb(-(2 ** 15)), b'\xD1\x80\x00')
self.assertEqual(self.transcoder.packb(-(2 ** 31)),
b'\xD2\x80\x00\x00\x00')
self.assertEqual(self.transcoder.packb(-(2 ** 63)),
b'\xD3\x80\x00\x00\x00\x00\x00\x00\x00')
def test_that_lists_are_treated_as_arrays(self):
dumped = self.transcoder.packb(list())
self.assertEqual(self.transcoder.unpackb(dumped), [])
self.assertEqual(dumped, b'\x90')
def test_that_tuples_are_treated_as_arrays(self):
dumped = self.transcoder.packb(tuple())
self.assertEqual(self.transcoder.unpackb(dumped), [])
self.assertEqual(dumped, b'\x90')
def test_that_sets_are_treated_as_arrays(self):
dumped = self.transcoder.packb(set())
self.assertEqual(self.transcoder.unpackb(dumped), [])
self.assertEqual(dumped, b'\x90')
def test_that_unhandled_objects_raise_type_error(self):
with self.assertRaises(TypeError):
self.transcoder.packb(object())
def test_that_uuids_are_dumped_as_strings(self):
uid = uuid.uuid4()
dumped = self.transcoder.packb(uid)
self.assertEqual(self.transcoder.unpackb(dumped), str(uid))
self.assertEqual(dumped, pack_string(uid))
def test_that_datetimes_are_dumped_in_isoformat(self):
now = datetime.datetime.now()
dumped = self.transcoder.packb(now)
self.assertEqual(self.transcoder.unpackb(dumped), now.isoformat())
self.assertEqual(dumped, pack_string(now.isoformat()))
def test_that_tzaware_datetimes_include_tzoffset(self):
now = datetime.datetime.now().replace(tzinfo=UTC())
self.assertTrue(now.isoformat().endswith('+00:00'))
dumped = self.transcoder.packb(now)
self.assertEqual(self.transcoder.unpackb(dumped), now.isoformat())
self.assertEqual(dumped, pack_string(now.isoformat()))
def test_that_bytes_are_sent_as_bytes(self):
data = bytes(os.urandom(127))
dumped = self.transcoder.packb(data)
self.assertEqual(self.transcoder.unpackb(dumped), data)
self.assertEqual(dumped, pack_bytes(data))
def test_that_bytearrays_are_sent_as_bytes(self):
data = bytearray(os.urandom(127))
dumped = self.transcoder.packb(data)
self.assertEqual(self.transcoder.unpackb(dumped), data)
self.assertEqual(dumped, pack_bytes(data))
def test_that_memoryviews_are_sent_as_bytes(self):
data = memoryview(os.urandom(127))
dumped = self.transcoder.packb(data)
self.assertEqual(self.transcoder.unpackb(dumped), data)
self.assertEqual(dumped, pack_bytes(data.tobytes()))
def test_that_utf8_values_can_be_forced_to_bytes(self):
data = b'a ascii value'
dumped = self.transcoder.packb(transcoders.BinaryWrapper(data))
self.assertEqual(self.transcoder.unpackb(dumped), data)
self.assertEqual(dumped, pack_bytes(data))
|
StarcoderdataPython
|
83222
|
# coding: utf-8
""" Some photometry tools for stellar spectroscopists """
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
from scipy import interpolate
from astropy.io import ascii
from .robust_polyfit import polyfit
import logging
import os, sys, time
logger = logging.getLogger(__name__)
__all__ = []
from .read_data import datapath
from .read_data import load_parsec_isochrones, load_dartmouth_isochrones
def eval_BC(Teff,logg,FeH,filt="g",allBCs=None):
"""
Default is alpha/Fe = +0.4
"""
if allBCs is None: allBCs = read_bc_table()
BCs = allBCs[filt]
points = np.atleast_2d([np.ravel(Teff),np.ravel(logg),np.ravel(FeH)]).T
points[points[:,2] < -2.5,2] = -2.5
out = interpolate.griddata(BCs[:,0:3], BCs[:,3], points, method='linear')
return out
def read_bc_table(fname=datapath+"/bolometric_corrections/bc_p04_ugriz.data"):
"""
Load a Casagrande+Vandenberg 2014 BC table
"""
with open(fname,'r') as fp:
lines = fp.readlines()
s = lines[1].split()
NTeff, Nlogg, NMH, Nfilt = int(s[0]), int(s[2]), int(s[5]), int(s[7])
allBCs = {}
Teffs = list(map(float, "".join(lines[2:5]).replace("\n"," ").split()))
loggs = list(map(float, lines[5].split()))
Nlist = list(map(int, lines[6].split()))
iline = 7
allBCs = {}
for ifilt in range(Nfilt):
BCtable = np.zeros((np.sum(Nlist)*NMH,4))
itable = 0
for iMH in range(NMH):
s = lines[iline].split()
FeH = float(s[2]); aFe = float(s[5]); filter = s[9]
iline += 1
for ilogg,logg in enumerate(loggs):
BCrow = []
while len(BCrow) < Nlist[ilogg]:
line = lines[iline]
iline += 1
BCrow += list(map(float, line.split()))
for iTeff,Teff in enumerate(Teffs[0:Nlist[ilogg]]):
BCtable[itable,0] = Teff
BCtable[itable,1] = logg
BCtable[itable,2] = FeH
BCtable[itable,3] = BCrow[iTeff]
itable += 1
allBCs[filter] = BCtable
return allBCs
##################################################################
# From Drlica-Wagner et al. 2018 (https://arxiv.org/abs/1708.01531)
# g_{des} = g_{sdss} - 0.104 \times (g-r)_{sdss} + 0.01
# r_{des} = r_{sdss} - 0.102 \times (g-r)_{sdss} + 0.02
# i_{des} = i_{sdss} - 0.256 \times (i-z)_{sdss} + 0.02
# z_{des} = z_{sdss} - 0.086 \times (i-z)_{sdss} + 0.01
##################################################################
def gr_sdss2des(gsdss,rsdss):
gmrsdss = gsdss - rsdss
gdes = gsdss - 0.104 * gmrsdss + 0.01
rdes = rsdss - 0.102 * gmrsdss + 0.02
return gdes, rdes
def iz_sdss2des(isdss,zsdss):
imzsdss = isdss - zsdss
ides = isdss - 0.256 * imzsdss + 0.02
zdes = zsdss - 0.086 * imzsdss + 0.01
return ides, zdes
def gr_des2sdss(gdes,rdes):
gmrdes = gdes-rdes
gmrsdss = (gmrdes + 0.01)/0.998
gsdss = gdes + 0.104 * gmrsdss - 0.01
rsdss = rdes + 0.102 * gmrsdss - 0.02
return gsdss, rsdss
def iz_des2sdss(ides,zdes):
imzdes = ides-zdes
imzsdss = (imzdes - 0.01)/0.830
isdss = ides + 0.256 * imzsdss - 0.02
zsdss = zdes + 0.086 * imzsdss - 0.01
return isdss, zsdss
def griz_des2sdss(gdes,rdes,ides,zdes):
gsdss, rsdss = gr_des2sdss(gdes,rdes)
isdss, zsdss = iz_des2sdss(ides,zdes)
return gsdss, rsdss, isdss, zsdss
### Setup Jordi06
# http://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php#Jordi2006
def get_jordi06_coeffs(type):
if type==0: # Combined Pop I/Pop II
a_Bmg = 0.313; e_a_Bmg = 0.003
b_Bmg = 0.219; e_b_Bmg = 0.002
a_Vmg =-0.565; e_a_Vmg = 0.001
b_Vmg =-0.016; e_b_Vmg = 0.001
elif type==1: # Pop I
a_Bmg = 0.312; e_a_Bmg = 0.003
b_Bmg = 0.219; e_b_Bmg = 0.002
a_Vmg =-0.573; e_a_Vmg = 0.002
b_Vmg =-0.016; e_b_Vmg = 0.002
elif type==2: # Pop II
a_Bmg = 0.349; e_a_Bmg = 0.009
b_Bmg = 0.245; e_b_Bmg = 0.006
a_Vmg =-0.569; e_a_Vmg = 0.007
b_Vmg = 0.021; e_b_Vmg = 0.004
else:
raise ValueError("Type must be 0, 1, 2 (got {})".format(type))
return a_Bmg, b_Bmg, a_Vmg, b_Vmg, e_a_Bmg, e_b_Bmg, e_a_Vmg, e_b_Vmg
def jordi06_gmi_to_VmI(gmi,geterr=True):
assert np.all(np.logical_or(np.ravel(gmi) < 2.1, np.isnan(np.ravel(gmi))))
VmI = 0.674 * gmi + 0.406
if geterr:
VmImin = (0.674-0.005)*gmi + (0.406 - 0.004)
VmImax = (0.674+0.005)*gmi + (0.406 + 0.004)
return VmImin, VmI, VmImax
return VmI
def _gmr_to_BmV(gmr,geterr=True,type=0):
a_Bmg, b_Bmg, a_Vmg, b_Vmg, e_a_Bmg, e_b_Bmg, e_a_Vmg, e_b_Vmg = get_jordi06_coeffs(type)
# Calculate middle
Bmg = a_Bmg*gmr + b_Bmg
Vmg = a_Vmg*gmr + b_Vmg
BmV = Bmg - Vmg
if not geterr: return BmV
# Calculate 1 sigma error estimate
if gmr >= 0:
Bmg_max = (a_Bmg+e_a_Bmg)*gmr+(b_Bmg+e_b_Bmg)
Bmg_min = (a_Bmg-e_a_Bmg)*gmr+(b_Bmg-e_b_Bmg)
Vmg_max = (a_Vmg+e_a_Vmg)*gmr+(b_Vmg+e_b_Vmg)
Vmg_min = (a_Vmg-e_a_Vmg)*gmr+(b_Vmg-e_b_Vmg)
else:
Bmg_max = (a_Bmg-e_a_Bmg)*gmr+(b_Bmg+e_b_Bmg)
Bmg_min = (a_Bmg+e_a_Bmg)*gmr+(b_Bmg-e_b_Bmg)
Vmg_max = (a_Vmg-e_a_Vmg)*gmr+(b_Vmg+e_b_Vmg)
Vmg_min = (a_Vmg+e_a_Vmg)*gmr+(b_Vmg-e_b_Vmg)
BmV_max = Bmg_max-Vmg_min
BmV_min = Bmg_min-Vmg_max
return BmV_min,BmV,BmV_max
jordi06_gmr_to_BmV = np.vectorize(_gmr_to_BmV)
###################################################################
# From Casagrande et al. 2010, applicable to dwarfs and subgiants #
###################################################################
def C10_Teff_BmV(BmV, FeH):
""" 73K scatter """
a0, a1, a2, a3, a4, a5 = .5665, .4809, -.0060, -.0613, -.0042, -.0055
theta = a0 + a1*BmV + a2*BmV*BmV + a3*BmV*FeH + a4*FeH + a5*FeH*FeH
Teff = 5040./theta
return Teff
def C10_Teff_VmI(VmI, FeH):
""" 59K scatter """
a0, a1, a2, a3, a4, a5 = .4033, .8171, -.1987, -.0409, .0319, .0012
theta = a0 + a1*VmI + a2*VmI*VmI + a3*VmI*FeH + a4*FeH + a5*FeH*FeH
Teff = 5040./theta
return Teff
##################################
# From Alonso et al. 1999: F0-K5 #
##################################
def A99_BC_V(Teff, FeH):
"""
Typical scatter is 0.025 for cool stars, 0.009 for warm stars (dividing at T=4500K)
Limits of applicability are 3.5 < logT < 3.96, though different for different [Fe/H] ranges
"""
X = np.ravel(np.log10(Teff) - 3.52); FeH = np.ravel(FeH)
# Equations 17 and 18
BC17 = -5.531e-2/X - 0.6177 + 4.420*X - 2.669*X**2. + 0.6943*X*FeH - 0.1071*FeH - 8.612e-3*FeH**2.
BC18 = -9.930e-2/X + 2.887e-2 + 2.275*X - 4.425*X**2. + 0.3505*X*FeH - 5.558e-2*FeH - 5.375e-3*FeH**2
BC = BC17.copy()
ii = np.log10(Teff) >= 3.65
BC[ii] = BC18[ii]
return BC
def B79_VmI_C2J(VmI):
""" Convert V-I in Cousins' mags to V-I in Johnson's mags from Bessell 1979 """
VmI = np.ravel(VmI)
out = VmI.copy()/0.778
out[VmI < 0] = VmI[VmI < 0]/0.713
ii = out > 2.0
out[ii] = (VmI[ii]+0.13)/0.835
return out
def A99_Teff_VmI(VmI):
"""
Johnson's V, Johnson's (NOT Cousins') I
125K scatter, no dependence on Fe/H.
I have assumed that VmI is given in Johnson-Cousins, and
"""
VmI = B79_VmI_C2J(VmI)
theta = 0.5379 + 0.3981 * VmI + 4.432e-2 * VmI**2 - 2.693e-2 * VmI**3
Teff = 5040./theta
return Teff
def _A99_function(X, FeH, a0, a1, a2, a3, a4, a5):
return a0 + a1*X + a2*X**2. + a3*X*FeH + a4*FeH + a5*FeH**2.
def _A99_Teff_BmV_3(BmV, FeH):
""" 167K scatter, B-V < 0.7 """
a0, a1, a2, a3, a4, a5 = 0.5716, 0.5404, -6.126e-2, -4.862e-2, -1.777e-2, -7.969e-3
return _A99_function(BmV, FeH, a0, a1, a2, a3, a4, a5)
def _A99_Teff_BmV_4(BmV, FeH):
""" 96K scatter, B-V > 0.8 """
a0, a1, a2, a3, a4, a5 = 0.6177, 0.4354, -4.025e-3, 5.204e-2, -0.1127, -1.385e-2
return _A99_function(BmV, FeH, a0, a1, a2, a3, a4, a5)
def A99_Teff_BmV(BmV, FeH):
"""
Johnson's B and V
Using equations 3 and 4 of A99, scatter is 167K
Linearly interpolating in theta = 5040/Teff for 0.7 < B-V < 0.8
"""
BmV = np.ravel(BmV); FeH = np.ravel(FeH)
t3 = _A99_Teff_BmV_3(BmV, FeH)
t4 = _A99_Teff_BmV_4(BmV, FeH)
# Bluest stars, Eq 3
t = t3.copy()
# Reddest stars, Eq 4
t[BmV > 0.8] = t4[BmV > 0.8]
# In between: 0.7 < B-V < 0.8, linear interpolate
ii = np.logical_and(BmV > 0.7, BmV <= 0.8)
x1, x2 = 0.7, 0.8
y1 = _A99_Teff_BmV_3(x1, FeH)
y2 = _A99_Teff_BmV_4(x2, FeH)
m = (y2 - y1)/(x2 - x1)
y = m * (BmV - x1) + y1
t[ii] = y[ii]
return 5040./t
def phot_logg(Teff,mag0,BCmag,distmod,Mstar=0.75):
"""
Using solar values from Venn et al. 2017
"""
return 4.44 + np.log10(Mstar) + 4*np.log10(Teff/5780) + 0.4 * (mag0 - distmod + BCmag - 4.75)
def iterate_find_logg(Teff,mag0,FeH,dmod,filt,maxiter=10,tol=.005):
""" Assumes [alpha/Fe] = +0.4, sdss mags for filt """
# Initialize BC and logg
BC = 0.0
logg = phot_logg(Teff,mag0,BC,dmod)
for iter in range(maxiter):
BC = eval_BC(Teff, logg, FeH, filt=filt)
new_logg = phot_logg(Teff,mag0,BC,dmod)
if np.all(np.abs(new_logg - logg) < tol):
break
logg = new_logg
else:
print("WARNING: Reached max iters")
return logg
def phot_logg_error(Tfracerr, dmoderr, masserr=0.05, magerr=0.0, BCerr=0.03):
"""
Estimate 1 sigma error in logg
Tfracerr: temperature error divided by temperature
dmoderr: distance modulus error in mag
masserr (0.05 mag): from assuming a mass, 0.05 is 0.7-0.8 Msun
magerr: assume this is negligible by default
BCerr: estimated about 0.03 mag from running CV14 several times
"""
Terr_mag = 4*Tfracerr # from a taylor expansion
magerr = 0.4*magerr
BCerr = 0.4*BCerr
dmoderr = 0.4*dmoderr
return np.sqrt(masserr**2 + Terr_mag**2 + magerr**2 + dmoderr**2 + BCerr**2)
###################
## Y2 isochrones ##
###################
def get_logT_to_logg(FeH=-3.0):
assert FeH in [-2.0, -2.5, -3.0]
if FeH == -2.0:
iso = ascii.read(datapath+'/stellar_param_data/afe040feh200set1_12gyr.txt')
elif FeH == -2.5:
iso = ascii.read(datapath+'/stellar_param_data/afe040feh250set1_12gyr.txt')
elif FeH == -3.0:
iso = ascii.read(datapath+'/stellar_param_data/afe040feh300set1_12gyr.txt')
ii_max_logT = np.argmax(iso['logT'])
max_logT = iso[ii_max_logT]['logT']
max_logg = iso[ii_max_logT]['logg']
#print max_logT, max_logg
ii = iso['logg'] < max_logg
logT = iso[ii]['logT']
logg = iso[ii]['logg']
logT_to_logg = interpolate.interp1d(logT,logg)
return logT_to_logg
_my_interps = [get_logT_to_logg(FeH) for FeH in [-2.0,-2.5,-3.0]]
def _logTFeH_to_logg(logT,FeH):
if FeH > -2.0: return _my_interps[0](logT)
elif FeH <= -3.0: return _my_interps[2](logT)
elif FeH <= -2.0 and FeH > -2.5:
x = (FeH+2.5)*2.0
assert x <= 1 and x >= 0
logg1 = _my_interps[0](logT)
logg2 = _my_interps[1](logT)
return logg1 * x + logg2 * (1-x)
elif FeH <= -2.5 and FeH > -3.5:
x = (FeH+3.0)*2.0
assert x <= 1 and x >= 0
logg1 = _my_interps[1](logT)
logg2 = _my_interps[2](logT)
return logg1 * x + logg2 * (1-x)
else:
raise ValueError("FeH = {}".format(FeH))
logTFeH_to_logg = np.vectorize(_logTFeH_to_logg)
###############################
## Microturbulence Relations ##
###############################
def get_logg_to_vt_B05():
b = ascii.read(datapath+'/stellar_param_data/barklem.txt')
## This fails in the newer version of scipy
#iisort = np.argsort(b['logg'])
#fit = interpolate.UnivariateSpline(b['logg'][iisort],b['Vt'][iisort],k=2)
coeff, sigma = polyfit(b['logg'],b['Vt'],2)
fit = lambda x: np.polyval(coeff, x)
return fit
def logg_to_vt_B05(logg):
fit = get_logg_to_vt_B05()
return fit(logg)
def logg_to_vt_K09(logg):
""" Kirby et al. 2009 ApJ 705, 328 (uncertainty is ~ 0.05 + 0.03*logg) """
return 2.13 - 0.23 * logg
def logg_to_vt_M08(logg):
""" Marino et al. 2008 A&A 490, 625 (from Gratton et al. 1996) """
return 2.22 - 0.322 * logg
#################
## Dereddening ##
#################
def deredden(EBV,filt):
""" Subtract this value from the observed magnitude to get the dereddened mags """
conversion_data = ascii.read(datapath+"/stellar_param_data/sf11.txt")
assert filt in conversion_data["filter"], (filt, conversion_data["filter"])
return EBV * float(conversion_data["AB_EBV"][np.where(conversion_data["filter"]==filt)[0]])
"""
Notes about filter conversions and definitions.
Johnson-Cousins system: UBV in Johnson, RI in Cousins. I think this is the same as the Landolt system.
Jordi+2006: converts from SDSS (as observed at APO, with primes???) to UBV(RI)c.
Alonso+1999: converts JOHNSON'S ONLY colors to Teff. So RI need to go to (RI)c if you use V-I.
Casagrande+2010: converts Johnson-Cousins to Teff
So the most consistent thing for DES mags is to go from griz_DES -> griz_SDSS -> UBV(RI)c -> Casagrande+2010
Note Casagrande+2010 is not calibrated to very red giants (<4500K).
For E(B-V)=0.02, I found the order in which you deredden makes <1 mmag difference in the final color.
"""
def determine_stellar_params(gmag,rmag,imag,zmag,
MH,dmod,
EBV=0,EBVerr=0.0,dmoderr=0.1,
gerr=0.02,rerr=0.02,ierr=0.02,zerr=0.02,
verbose=True,fp=sys.stdout,
Teff_color="VmI", Teff_calib="C10",
logg_mag="r", full_output=False):
"""
[g,r,i,z]mag: DES magnitudes
MH: input metallicity
dmod: distance modulus
[g,r,i,z]err: magnitude error
default 0.02 mag in each band, the absolute calibration uncertainty (ADW+2017 arxiv:1708.01531)
(The internal calibration uncertainty is <4mmag)
Effective temperature error includes:
[g/r/i]err, EBVerr, Jordi06 err
"""
assert Teff_color in ["BmV","VmI"], Teff_color
assert Teff_calib in ["C10","A99"], Teff_calib
assert logg_mag in ["g","r","i"], logg_mag
out = griz_des2sdss(gmag,rmag,imag,zmag)
g,r,i,z = out
if verbose:
fp.write("g-r={:.2f}->{:.2f}\n".format(gmag-rmag,g-r))
fp.write("g-i={:.2f}->{:.2f}\n".format(gmag-imag,g-i))
logg_mag_dict = {"g":g,"r":r,"i":i}
logg_magerr_dict = {"g":gerr,"r":rerr,"i":ierr}
## Determine Effective Temperature and Error
## Output: Teff, Teff_err, color, color_err
if Teff_color=="BmV":
BmV1, BmV, BmV2 = jordi06_gmr_to_BmV(g-r, geterr=True)
BmVerr = max(abs(BmV2-BmV), abs(BmV-BmV1))
BmVerr = np.sqrt(BmVerr**2. + gerr**2 + rerr**2 + EBVerr**2)
BmV = BmV + EBV
if Teff_calib=="C10":
Teff = C10_Teff_BmV(BmV, MH)
Teff1 = C10_Teff_BmV(BmV-BmVerr, MH)
Teff2 = C10_Teff_BmV(BmV+BmVerr, MH)
Teff_syserr = 73.
elif Teff_calib=="A99":
Teff = A99_Teff_BmV(BmV, MH)
Teff1 = A99_Teff_BmV(BmV-BmVerr, MH)
Teff2 = A99_Teff_BmV(BmV+BmVerr, MH)
Teff_syserr = 167.
color_err = BmVerr
color = BmV
elif Teff_color=="VmI":
EVI = deredden(EBV, "LandoltV") - deredden(EBV, "LandoltI")
VmI1, VmI, VmI2 = jordi06_gmi_to_VmI(g-i, geterr=True)
VmIerr = max(abs(VmI2 - VmI), abs(VmI - VmI1))
VmIerr = np.sqrt(VmIerr**2 + gerr**2 + ierr**2 + EBVerr**2)
VmI = VmI + EVI
if Teff_calib=="C10":
Teff = C10_Teff_VmI(VmI, MH)
Teff1 = C10_Teff_VmI(VmI-VmIerr, MH)
Teff2 = C10_Teff_VmI(VmI+VmIerr, MH)
Teff_syserr = 59.
elif Teff_calib=="A99":
Teff = A99_Teff_VmI(VmI)
Teff1 = A99_Teff_VmI(VmI-VmIerr)
Teff2 = A99_Teff_VmI(VmI+VmIerr)
Teff_syserr = 125.
color_err = VmIerr
color = VmI
if verbose: fp.write("{}={:.2f}±{:.2f}\n".format(Teff_color, color, color_err))
Teff_err = max(abs(Teff-Teff1), abs(Teff-Teff2))
if verbose: fp.write("Teff={:.0f} ± {:.0f} (stat) ± {:.0f} (sys)\n".format(Teff,Teff_err,Teff_syserr))
Teff_err = np.sqrt(Teff_err**2 + Teff_syserr**2)
logg = iterate_find_logg(Teff, logg_mag_dict[logg_mag], MH, dmod, logg_mag)
try:
logg = logg[0]
except:
pass
logg_err = phot_logg_error(Teff_err/Teff, dmoderr, magerr=logg_magerr_dict[logg_mag])
if verbose: fp.write("logg ({})={:.2f} ± {:.2f} (stat)\n".format(logg_mag, logg, logg_err))
vt_syserr = 0.13 # from scatter around B05 relation
vt = logg_to_vt_B05(logg)
vt1 = logg_to_vt_B05(logg-logg_err)
vt2 = logg_to_vt_B05(logg+logg_err)
vt_err = max(abs(vt-vt1),abs(vt-vt2))
if verbose: fp.write("vt={:.2f} ± {:.2f} (stat) ± {:.2f} (sys)\n".format(vt, vt_err, vt_syserr))
vt_err = np.sqrt(vt_syserr**2 + vt_err**2)
if full_output:
return Teff, Teff_err, logg, logg_err, vt, vt_err, color, color_err, g, r, i, z
return Teff, Teff_err, logg, logg_err, vt, vt_err
def parsec_des_stellar_params(dmod=0):
""" Uses label=2 and 3 (subgiant/RGB) to create gmag, rmag->Teff,logg """
isos = load_parsec_isochrones("DECAM")
g_Teff_funcs = {}
g_logg_funcs = {}
r_Teff_funcs = {}
r_logg_funcs = {}
gmr_Teff_funcs = {}
gmr_logg_funcs = {}
interp_kwargs = {"bounds_error":False,"fill_value":np.nan}
for key in isos.keys():
tab = isos[key]
tab = tab[(tab["label"]==2) | (tab["label"]==3)]
gmag, rmag = tab["gmag"], tab["rmag"]
logT, logg = tab["logTe"], tab["logg"]
Teff = 10**logT
g_Teff_funcs[key] = interpolate.interp1d(gmag+dmod,Teff,**interp_kwargs)
g_logg_funcs[key] = interpolate.interp1d(gmag+dmod,logg,**interp_kwargs)
r_Teff_funcs[key] = interpolate.interp1d(rmag+dmod,Teff,**interp_kwargs)
r_logg_funcs[key] = interpolate.interp1d(rmag+dmod,logg,**interp_kwargs)
gmr_Teff_funcs[key] = interpolate.interp1d(gmag-rmag,Teff,**interp_kwargs)
gmr_logg_funcs[key] = interpolate.interp1d(gmag-rmag,logg,**interp_kwargs)
return g_Teff_funcs, g_logg_funcs, r_Teff_funcs, r_logg_funcs, gmr_Teff_funcs, gmr_logg_funcs
def dartmouth_des_stellar_params(dmod=0,ages=[10.0,11.0,12.0,13.0,14.0],logZs=[-2.5,-2.0,-1.5],alpha="ap4"):
""" Uses label=2 and 3 (subgiant/RGB) to create gmag, rmag->Teff,logg """
isos = {}
for MH in [-2.5,-2.0,-1.5]:
_isos = load_dartmouth_isochrones(MH,alpha,"DECAM")
for key in _isos.keys():
tab = _isos[key]
tab = tab[tab["EEP"] > 111]
isos[key] = tab
g_Teff_funcs = {}
g_logg_funcs = {}
r_Teff_funcs = {}
r_logg_funcs = {}
gmr_Teff_funcs = {}
gmr_logg_funcs = {}
interp_kwargs = {"bounds_error":False,"fill_value":np.nan}
for key in isos.keys():
tab = isos[key]
gmag, rmag = tab["gmag"], tab["rmag"]
logT, logg = tab["logTe"], tab["logg"]
Teff = 10**logT
g_Teff_funcs[key] = interpolate.interp1d(gmag+dmod,Teff,**interp_kwargs)
g_logg_funcs[key] = interpolate.interp1d(gmag+dmod,logg,**interp_kwargs)
r_Teff_funcs[key] = interpolate.interp1d(rmag+dmod,Teff,**interp_kwargs)
r_logg_funcs[key] = interpolate.interp1d(rmag+dmod,logg,**interp_kwargs)
gmr_Teff_funcs[key] = interpolate.interp1d(gmag-rmag,Teff,**interp_kwargs)
gmr_logg_funcs[key] = interpolate.interp1d(gmag-rmag,logg,**interp_kwargs)
return g_Teff_funcs, g_logg_funcs, r_Teff_funcs, r_logg_funcs, gmr_Teff_funcs, gmr_logg_funcs
def photometric_stellarparam_derivatives(Teff, logg,
dTdcolor,dvtdlogg=None,
color=None, dTdcolor_func=None):
"""
Computes dTeff/dlogg, dvt/dlogg assuming purely photometric determinations
This can be used to get the stellar parameter covariances/correlations.
Input:
Teff: effective temperature in Kelvin
logg: surface gravity
dTdcolor: derivative of effective temperature with respect to color (e.g., g-r)
Currently you have to compute outside and specify it
dvtdlogg: derivative of microturbulence with respect to logg
By default, computes dvt/dlogg using B05 relation. You can specify a number
here to overwrite the behavior.
Returns:
dloggdTeff, dvtdlogg
You can convert these to covariances with these formulas:
Cov(T,g) = dg/dT * sigma_T^2
Cov(v,g) = dv/dg * sigma_g^2
Cov(T,v) = dv/dg * dg/dT * sigma_T^2
Or correlations:
Corr(T,g) = dg/dT * sigma_T/sigma_g
Corr(v,g) = dv/dg * sigma_g/sigma_v
Corr(T,v) = Corr(T,g) * Corr(v,g)
"""
dloggdT = 4/(np.log(10) * Teff) + 0.4/dTdcolor
if dvtdlogg is None or dvtdlogg=="B05":
# This is the analytic derivative of the Barklem+2005 relation
dvtdlogg = 0.173 * logg - 0.6897
elif dvtdlogg == "M08":
dvdtdlogg = -0.322
elif dvtdlogg == "K09":
dvdtdlogg = -0.23
return dloggdT, dvtdlogg
|
StarcoderdataPython
|
1602680
|
<reponame>garywei944/AlphaSMILES
import json
import os
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from mcts import parameters as p
from tools.plot_wavelength import plot_wl
def select(data, starting_with='', wl_min=0, wl_max=float('inf'), unit="nm", f_min=0.0):
"""
Select SMILES in data with oscillator strength peaks between bounds
:param data: dict to analyse
:type data: dict(SMILES)
:param starting_with: beginning of the SMILES (you have to add an extra ' before the SMILES)
:type starting_with: str
:param wl_min: wavelength minimum for the search
:type wl_min: float
:param wl_max: wavelength maximum for the search
:type wl_max: float
:param unit: unit of the wavelength ( "nm" , "ev" or "cm-1" )
:type unit: str
:param f_min: oscillator strength minimum for the search
:type f_min: float
:return: list of selected smiles
"""
selected_smiles = set()
for smiles in data.keys():
if data[smiles]['valid']:
if smiles.startswith(starting_with):
for line in data[smiles][p.s_dft]:
if wl_min <= line[unit] <= wl_max and line['f'] >= f_min:
selected_smiles.add(smiles)
print(smiles)
print(line)
return selected_smiles
def smiles_to_image(i, smiles):
"""
Convert a SMILES into image,
the image name is "id" + "_2D.png"
:param i: id of the SMILES given in parameter
:type i: int
:param smiles: String corresponding to the SMILES to convert to image
:type smiles: str
:return:
"""
m = Chem.MolFromSmiles(smiles)
AllChem.Compute2DCoords(m)
Draw.MolToFile(m, "../data_out/" + p.config["configuration_name"] + "/plot/" + str(i) + '_2D.png')
def find(config_name, starting_with='', wl_min=0, wl_max=float('inf'), unit="nm", f_min=0.0, plot_wavelength=True):
"""
Look for the SMILES with the given parameters
:param config_name: name of the configuration to use
:type config_name: str
:param starting_with: the begging of the SMILES
:type starting_with: str
:param wl_min: wavelength minimum for the search
:type wl_min: float
:param wl_max: wavelength maximum for the search
:type wl_max: float
:param unit: unit of the wavelength ( "nm" , "ev" or "cm-1" )
:type unit: str
:param f_min: oscillator strength minimum for the search
:type f_min: float
:param plot_wavelength: if you want to get a plot of the wavelength or not
:type plot_wavelength: bool
:return: None
"""
with open('../data_out/' + config_name + '/data.json') as d:
data = json.load(d)
with open('../mcts/configurations/' + config_name + ".json") as c:
p.config = json.load(c)
selected = select(data, starting_with, wl_min, wl_max, unit, f_min)
if not os.path.isdir("../data_out/" + p.config["configuration_name"] + "/plot/"):
os.mkdir("../data_out/" + p.config["configuration_name"] + "/plot/")
for s in selected:
smi = s[1:-1] if s[0] == "'" else s
smi = "".join(p.config['long_prefix']) + smi
smiles_to_image(data[s]['id'], smi)
if plot_wavelength:
plot_wl(data, s)
if __name__ == '__main__':
find('generated', starting_with="'c1", wl_min=500, unit="nm", f_min=0.4)
|
StarcoderdataPython
|
3382605
|
<filename>iaflash/app/app.py
import os
from PIL import Image
import cv2
import json
from flask import Flask, render_template, Response, render_template_string, send_from_directory, request
import pandas as pd
from iaflash.environment import ROOT_DIR
from iaflash.filter import read_df, dict2args
WIDTH = 600
HEIGHT = 400
app = Flask(__name__)
@app.route('/<path:filename>')
def image(filename):
w = request.args.get('w', None)
h = request.args.get('h', None)
x1 = request.args.get('x1', None)
y1 = request.args.get('y1', None)
x2 = request.args.get('x2', None)
y2 = request.args.get('y2', None)
marque = request.args.get('marque', None)
modele = request.args.get('modele', None)
score = request.args.get('score', None)
text = request.args.get('text', None)
try:
print("filename is : " + filename)
im = cv2.imread(os.path.join('/',filename))
if x1 and x2 and y1 and y2:
cv2.rectangle(im, (int(float(x1)), int(float(y1))), (int(float(x2)),int(float(y2))), (0,0,255), 2)
if text:
cv2.putText(im, text, (int(float(x1)), int(float(y2)) - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)
if w and h:
w, h = int(w), int(h)
print(im.shape)
im = cv2.resize(im, (w, h))
_, img_encoded = cv2.imencode('.jpg', im)
return Response(img_encoded.tobytes(), mimetype='image/jpeg')
except Exception as e:
print(e)
return send_from_directory('.', filename)
@app.route('/')
def images():
return json.dumps({'status': 'ok'})
@app.route('/csv/<path:csvpath>')
def images_csv(csvpath):
query = request.args.get('query', None)
limit = request.args.get('limit', None)
if limit:
nrows = int(limit)
else:
nrows = None
images = []
dirname = os.path.dirname(csvpath)
filename = os.path.join('/', csvpath)
df = pd.read_csv(filename, nrows=nrows)#.sample(10000)
#df_val = df_val[df_val['x1'].notnull()]
classes_ids = read_class_reference(dirname)
df['class_name'] = df['target'].astype(int).astype(str).replace(classes_ids)
df['text'] = 'Label: ' + df['class_name']
if'pred_class' in df.columns :
df['text'] += '- Pred: ' + df['pred_class'].astype(int).astype(str).replace(classes_ids)
if 'proba' in df.columns :
df['text'] += ' Score: ' + df['proba'].round(3).astype(str)
#df = df[df['target'] != df['pred_class']]
if query:
print(query)
df = df.query(query)
"""
if limit :
df = df.sample(int(limit))
"""
df['img_path'] = df['img_path'].astype(str)
df = df.sort_values(by =['img_path'], ascending=False)
print(df.head())
for i, row in df.iterrows():
ROOT_DIR = "/vgdata/sources/verbalisations/antai"
filename = os.path.join(ROOT_DIR, row['img_path'])
im = Image.open(filename)
w, h = im.size
aspect = 1.0*w/h
width = aspect * HEIGHT
height = HEIGHT
images.append({
'width': int(width),
'height': int(height),
'src': filename,
'x1': row.get("x1",0),
'y1': row.get("y1",0),
'x2': row.get("x2",0),
'y2': row.get("y2",0),
'text': row['text'],
})
return render_template("preview.html", **{
'images': images
})
@app.route('/explore')
def images_explore():
ROOT_DIR = '/vgdata/sources/verbalisations/antai'
images = []
df = read_df(dict2args(request.args))
print(df.head())
print('Retrieve %s rows'%df.shape[0])
col_img = request.args.get('col_img', 'img_name')
query = request.args.get('query', None)
if query:
print(query)
df = df.query(query)
df.sort_values('path',inplace=True)
for col in ['x1','y1','x2','y2'] :
if col in df.columns:
df[col] = df[col].fillna(0)
for i, row in df.iterrows():
filename = os.path.join(ROOT_DIR,row['path'],row[col_img])
im = Image.open(filename)
w, h = im.size
aspect = 1.0*w/h
width = aspect * HEIGHT
height = HEIGHT
#if ('marque' in row) and ('modele' in row):
# text = "{}, {}".format(row['marque'], row['modele'])
if ('CG_MarqueVehicule' in row) and ('CG_ModeleVehicule' in row):
text = "{}, {}".format(row['CG_MarqueVehicule'], row['CG_ModeleVehicule'])
row = row.append(pd.Series([30,30,30,30], index=['x1','y1','x2','y2']))
elif ('class' in row) and ('score' in row):
text = "{}, {}".format(row['class'], row['score'])
else:
text = 'Pas de prediction'
images.append({
'width': int(width),
'height': int(height),
'src': filename,
'x1': row.get("x1",0),
'y1': row.get("y1",0),
'x2': row.get("x2",0),
'y2': row.get("y2",0),
'text': text
})
return render_template("preview.html", **{
'images': images
})
def read_class_reference(dirname):
filename = os.path.join('/', dirname, 'idx_to_class.json')
with open(filename) as json_data:
classes_ids = json.load(json_data)
return classes_ids
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True,debug=True)
|
StarcoderdataPython
|
1635222
|
<gh_stars>1-10
import pytest
from dataframe_generator.data_type import LongType, StringType, ByteType, IntegerType, DateType, TimestampType, \
ShortType, DecimalType
from dataframe_generator.struct_field import StructField
from dataframe_generator.struct_type import StructType
from tests.matchers import assert_struct_type_equals
test_data = [
("""
schemaname2 = StructType([
StructField('name12', LongType(), True),
StructField('name22', StringType(), True),
StructField('name32', ByteType(), False),
StructField('name42', IntegerType(), True),
StructField('name52', DateType(), True),
StructField('name62', TimestampType(), True),
StructField('name72', ShortType(), False),
])""",
StructType('schemaname2', [
StructField('name12', LongType(), True),
StructField('name22', StringType(), True),
StructField('name32', ByteType(), False),
StructField('name42', IntegerType(), True),
StructField('name52', DateType(), True),
StructField('name62', TimestampType(), True),
StructField('name72', ShortType(), False),
])
),
("""
my_cool_schema =StructType([StructField('name12',LongType(),False),
StructField('name22',StringType(), True),StructField('name32',ByteType(), False),
StructField('name42',IntegerType(), True), StructField('name52',DateType(), True),
StructField("name62",TimestampType(),True),
StructField('name72',ShortType(),False)
])""",
StructType('my_cool_schema', [
StructField('name12', LongType(), False),
StructField('name22', StringType(), True),
StructField('name32', ByteType(), False),
StructField('name42', IntegerType(), True),
StructField('name52', DateType(), True),
StructField('name62', TimestampType(), True),
StructField('name72', ShortType(), False),
])
),
]
@pytest.mark.parametrize("raw_input, expected", test_data)
def test_parse(raw_input, expected):
assert_struct_type_equals(expected, StructType.parse(raw_input))
def test_parse_multiple():
input_multiple = """
first_schema = StructType([
StructField('name12', LongType(), True),
StructField('name22', DecimalType(3, 2), True),
StructField('name32', ByteType(), False),
StructField('name42', IntegerType(), True),
StructField('name52', DateType(), True),
StructField('name62', TimestampType(), True),
StructField('name72', ShortType(), False),
])
my_cool_schema =StructType([StructField('name12',LongType(),False),
StructField('name22',StringType(), True),StructField('name32',ByteType(), False),
StructField('name42',IntegerType(), True), StructField('name52',DateType(), True),
StructField("name62",TimestampType(),True),
StructField('name72',ShortType(),False)
])
"""
expected = {
'first_schema': StructType('first_schema', [
StructField('name12', LongType(), True),
StructField('name22', DecimalType(3, 2), True),
StructField('name32', ByteType(), False),
StructField('name42', IntegerType(), True),
StructField('name52', DateType(), True),
StructField('name62', TimestampType(), True),
StructField('name72', ShortType(), False),
]),
'my_cool_schema': StructType('my_cool_schema', [
StructField('name12', LongType(), False),
StructField('name22', StringType(), True),
StructField('name32', ByteType(), False),
StructField('name42', IntegerType(), True),
StructField('name52', DateType(), True),
StructField('name62', TimestampType(), True),
StructField('name72', ShortType(), False),
])
}
actual = StructType.parse_multiple(input_multiple)
assert_struct_type_equals(expected['first_schema'], actual['first_schema'])
assert_struct_type_equals(expected['my_cool_schema'], actual['my_cool_schema'])
|
StarcoderdataPython
|
1675498
|
<reponame>shubh2ds/DSA_Python
def partition_for_quick_sort(arr,sidx,eidx):
pivot=arr[sidx]
c=0
for i in range(sidx,eidx+1):
if arr[i]<pivot:
c=c+1
arr[sidx+c],arr[sidx] = arr[sidx],arr[sidx+c]
pivot_idx=sidx+c
i=sidx
j=eidx
while i<j:
if arr[i]<pivot:
i=i+1
elif arr[j]>=pivot:
j=j-1
else:
arr[i],arr[j]=arr[j],arr[i]
i=i+1
j=j-1
return pivot_idx
arr=[6,2,1,4,3,8,9,12,5]
sidx,eidx=0,len(arr)-1
partition(arr,sidx,eidx)
print(arr)
|
StarcoderdataPython
|
135588
|
<gh_stars>10-100
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
import os
from reco_utils.recommender.newsrec.newsrec_utils import prepare_hparams
from reco_utils.recommender.deeprec.deeprec_utils import download_deeprec_resources
from reco_utils.recommender.newsrec.models.nrms import NRMSModel
from reco_utils.recommender.newsrec.models.naml import NAMLModel
from reco_utils.recommender.newsrec.models.lstur import LSTURModel
from reco_utils.recommender.newsrec.models.npa import NPAModel
from reco_utils.recommender.newsrec.io.mind_iterator import MINDIterator
from reco_utils.recommender.newsrec.io.mind_all_iterator import MINDAllIterator
@pytest.fixture
def resource_path():
return os.path.dirname(os.path.realpath(__file__))
@pytest.mark.gpu
def test_nrms_component_definition(tmp):
wordEmb_file = os.path.join(tmp, "utils", "embedding.npy")
userDict_file = os.path.join(tmp, "utils", "uid2index.pkl")
wordDict_file = os.path.join(tmp, "utils", "word_dict.pkl")
yaml_file = os.path.join(tmp, "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
r"https://recodatasets.blob.core.windows.net/newsrec/",
os.path.join(tmp, "utils"),
"MINDdemo_utils.zip",
)
hparams = prepare_hparams(
yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
epochs=1,
)
iterator = MINDIterator
model = NRMSModel(hparams, iterator)
assert model.model is not None
assert model.scorer is not None
assert model.loss is not None
assert model.train_optimizer is not None
@pytest.mark.gpu
def test_naml_component_definition(tmp):
wordEmb_file = os.path.join(tmp, "utils", "embedding_all.npy")
userDict_file = os.path.join(tmp, "utils", "uid2index.pkl")
wordDict_file = os.path.join(tmp, "utils", "word_dict_all.pkl")
vertDict_file = os.path.join(tmp, "utils", "vert_dict.pkl")
subvertDict_file = os.path.join(tmp, "utils", "subvert_dict.pkl")
yaml_file = os.path.join(tmp, "utils", r"naml.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
r"https://recodatasets.blob.core.windows.net/newsrec/",
os.path.join(tmp, "utils"),
"MINDdemo_utils.zip",
)
hparams = prepare_hparams(
yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
vertDict_file=vertDict_file,
subvertDict_file=subvertDict_file,
epochs=1,
)
iterator = MINDAllIterator
model = NAMLModel(hparams, iterator)
assert model.model is not None
assert model.scorer is not None
assert model.loss is not None
assert model.train_optimizer is not None
@pytest.mark.gpu
def test_npa_component_definition(tmp):
wordEmb_file = os.path.join(tmp, "utils", "embedding.npy")
userDict_file = os.path.join(tmp, "utils", "uid2index.pkl")
wordDict_file = os.path.join(tmp, "utils", "word_dict.pkl")
yaml_file = os.path.join(tmp, "utils", r"npa.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
r"https://recodatasets.blob.core.windows.net/newsrec/",
os.path.join(tmp, "utils"),
"MINDdemo_utils.zip",
)
hparams = prepare_hparams(
yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
epochs=1,
)
iterator = MINDIterator
model = NPAModel(hparams, iterator)
assert model.model is not None
assert model.scorer is not None
assert model.loss is not None
assert model.train_optimizer is not None
@pytest.mark.gpu
def test_lstur_component_definition(tmp):
wordEmb_file = os.path.join(tmp, "utils", "embedding.npy")
userDict_file = os.path.join(tmp, "utils", "uid2index.pkl")
wordDict_file = os.path.join(tmp, "utils", "word_dict.pkl")
yaml_file = os.path.join(tmp, "utils", r"lstur.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
r"https://recodatasets.blob.core.windows.net/newsrec/",
os.path.join(tmp, "utils"),
"MINDdemo_utils.zip",
)
hparams = prepare_hparams(
yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
epochs=1,
)
iterator = MINDIterator
model = LSTURModel(hparams, iterator)
assert model.model is not None
assert model.scorer is not None
assert model.loss is not None
assert model.train_optimizer is not None
|
StarcoderdataPython
|
3364410
|
<reponame>Ixyk-Wolf/aiohttp-demos
import pickle
from collections import namedtuple
import numpy as np
_model = None
Scores = namedtuple("Scores", ["toxic", "severe_toxic",
"obscence", "insult", "identity_hate"])
def warm(model_path):
global _model
if _model is None:
with model_path.open('rb') as fp:
pipeline = pickle.load(fp)
_model = pipeline
return True
def predict(message):
results = _model.predict_proba([message])
results = np.array(results).T[1].tolist()[0]
return Scores(*results)
|
StarcoderdataPython
|
1795157
|
<reponame>jhson989/jhML<filename>playground/step2/config.py<gh_stars>0
class Config:
enable_backprop = True
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.