content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
from typing import Optional
from ..category import Category
class Utils(Category):
def check_link(
self,
url: str = None,
**kwargs
) -> dict:
return self._request("checkLink", locals())
def delete_from_last_shortened(
self,
key: str = None,
**kwargs
) -> dict:
return self._request("deleteFromLastShortened", locals())
def get_last_shortened_links(
self,
count: Optional[int] = None,
offset: Optional[int] = None,
**kwargs
) -> dict:
return self._request("getLastShortenedLinks", locals())
def get_link_stats(
self,
key: str = None,
source: Optional[str] = None,
access_key: Optional[str] = None,
interval: Optional[str] = None,
intervals_count: Optional[int] = None,
extended: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("getLinkStats", locals())
def get_server_time(
self,
**kwargs
) -> dict:
return self._request("getServerTime", locals())
def get_short_link(
self,
url: str = None,
private: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("getShortLink", locals())
def resolve_screen_name(
self,
screen_name: str = None,
**kwargs
) -> dict:
return self._request("resolveScreenName", locals())
| 23.390625 | 65 | 0.560454 | [
"MIT"
] | UT1C/pyVDK | pyvdk/api/categories/utils.py | 1,497 | Python |
from pynzb.base import BaseETreeNZBParser, NZBFile, NZBSegment
try:
from lxml import etree
except ImportError:
raise ImportError("You must have lxml installed before you can use the " +
"lxml NZB parser.")
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class LXMLNZBParser(BaseETreeNZBParser):
def get_etree_iter(self, xml, et=etree):
return iter(et.iterparse(StringIO(xml), events=("start", "end"))) | 29.875 | 78 | 0.732218 | [
"BSD-3-Clause"
] | DavidM42/pynzb | pynzb/lxml_nzb.py | 478 | Python |
#
# PySNMP MIB module SNMP-MPD-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SNMP-MPD-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:08:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
IpAddress, TimeTicks, ObjectIdentity, snmpModules, ModuleIdentity, Integer32, Counter64, Counter32, Unsigned32, iso, Bits, NotificationType, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "TimeTicks", "ObjectIdentity", "snmpModules", "ModuleIdentity", "Integer32", "Counter64", "Counter32", "Unsigned32", "iso", "Bits", "NotificationType", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
snmpMPDMIB = ModuleIdentity((1, 3, 6, 1, 6, 3, 11))
snmpMPDMIB.setRevisions(('2002-10-14 00:00', '1999-05-04 16:36', '1997-09-30 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: snmpMPDMIB.setRevisionsDescriptions(('Updated addresses, published as RFC 3412.', 'Updated addresses, published as RFC 2572.', 'Original version, published as RFC 2272.',))
if mibBuilder.loadTexts: snmpMPDMIB.setLastUpdated('200210140000Z')
if mibBuilder.loadTexts: snmpMPDMIB.setOrganization('SNMPv3 Working Group')
if mibBuilder.loadTexts: snmpMPDMIB.setContactInfo('WG-EMail: [email protected] Subscribe: [email protected] Co-Chair: Russ Mundy Network Associates Laboratories postal: 15204 Omega Drive, Suite 300 Rockville, MD 20850-4601 USA EMail: [email protected] phone: +1 301-947-7107 Co-Chair & Co-editor: David Harrington Enterasys Networks postal: 35 Industrial Way P. O. Box 5005 Rochester NH 03866-5005 USA EMail: [email protected] phone: +1 603-337-2614 Co-editor: Jeffrey Case SNMP Research, Inc. postal: 3001 Kimberlin Heights Road Knoxville, TN 37920-9716 USA EMail: [email protected] phone: +1 423-573-1434 Co-editor: Randy Presuhn BMC Software, Inc. postal: 2141 North First Street San Jose, CA 95131 USA EMail: [email protected] phone: +1 408-546-1006 Co-editor: Bert Wijnen Lucent Technologies postal: Schagen 33 3461 GL Linschoten Netherlands EMail: [email protected] phone: +31 348-680-485 ')
if mibBuilder.loadTexts: snmpMPDMIB.setDescription('The MIB for Message Processing and Dispatching Copyright (C) The Internet Society (2002). This version of this MIB module is part of RFC 3412; see the RFC itself for full legal notices. ')
snmpMPDAdmin = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 1))
snmpMPDMIBObjects = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 2))
snmpMPDMIBConformance = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 3))
snmpMPDStats = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 2, 1))
snmpUnknownSecurityModels = MibScalar((1, 3, 6, 1, 6, 3, 11, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpUnknownSecurityModels.setStatus('current')
if mibBuilder.loadTexts: snmpUnknownSecurityModels.setDescription('The total number of packets received by the SNMP engine which were dropped because they referenced a securityModel that was not known to or supported by the SNMP engine. ')
snmpInvalidMsgs = MibScalar((1, 3, 6, 1, 6, 3, 11, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInvalidMsgs.setStatus('current')
if mibBuilder.loadTexts: snmpInvalidMsgs.setDescription('The total number of packets received by the SNMP engine which were dropped because there were invalid or inconsistent components in the SNMP message. ')
snmpUnknownPDUHandlers = MibScalar((1, 3, 6, 1, 6, 3, 11, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpUnknownPDUHandlers.setStatus('current')
if mibBuilder.loadTexts: snmpUnknownPDUHandlers.setDescription('The total number of packets received by the SNMP engine which were dropped because the PDU contained in the packet could not be passed to an application responsible for handling the pduType, e.g. no SNMP application had registered for the proper combination of the contextEngineID and the pduType. ')
snmpMPDMIBCompliances = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 3, 1))
snmpMPDMIBGroups = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 3, 2))
snmpMPDCompliance = ModuleCompliance((1, 3, 6, 1, 6, 3, 11, 3, 1, 1)).setObjects(("SNMP-MPD-MIB", "snmpMPDGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
snmpMPDCompliance = snmpMPDCompliance.setStatus('current')
if mibBuilder.loadTexts: snmpMPDCompliance.setDescription('The compliance statement for SNMP entities which implement the SNMP-MPD-MIB. ')
snmpMPDGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 11, 3, 2, 1)).setObjects(("SNMP-MPD-MIB", "snmpUnknownSecurityModels"), ("SNMP-MPD-MIB", "snmpInvalidMsgs"), ("SNMP-MPD-MIB", "snmpUnknownPDUHandlers"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
snmpMPDGroup = snmpMPDGroup.setStatus('current')
if mibBuilder.loadTexts: snmpMPDGroup.setDescription('A collection of objects providing for remote monitoring of the SNMP Message Processing and Dispatching process. ')
mibBuilder.exportSymbols("SNMP-MPD-MIB", snmpMPDMIBGroups=snmpMPDMIBGroups, snmpMPDMIB=snmpMPDMIB, snmpMPDCompliance=snmpMPDCompliance, snmpMPDStats=snmpMPDStats, snmpUnknownPDUHandlers=snmpUnknownPDUHandlers, snmpMPDMIBCompliances=snmpMPDMIBCompliances, snmpMPDGroup=snmpMPDGroup, PYSNMP_MODULE_ID=snmpMPDMIB, snmpMPDMIBObjects=snmpMPDMIBObjects, snmpMPDAdmin=snmpMPDAdmin, snmpMPDMIBConformance=snmpMPDMIBConformance, snmpUnknownSecurityModels=snmpUnknownSecurityModels, snmpInvalidMsgs=snmpInvalidMsgs)
| 132.041667 | 921 | 0.778321 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/SNMP-MPD-MIB.py | 6,338 | Python |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rule objects."""
__author__ = 'Sean Lip'
import inspect
import os
import pkgutil
from core.domain import rule_domain
from extensions.objects.models import objects
import feconf
import test_utils
class FakeRule(rule_domain.Rule):
subject_type = objects.Real
description = 'is between {{x|Real}} and {{y|UnicodeString}}'
def _evaluate(self, subject):
return subject == self.x
class RuleServicesUnitTests(test_utils.GenericTestBase):
"""Tests for rule services."""
def test_get_rules_for_obj_type(self):
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('NonnegativeInt')), 1)
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('Real')), 7)
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('Null')), 0)
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('FakeObjType')), 0)
class RuleDomainUnitTests(test_utils.GenericTestBase):
"""Tests for rules."""
def test_rule_initialization(self):
with self.assertRaises(ValueError):
FakeRule()
with self.assertRaises(ValueError):
FakeRule(1, 'too_many_args', 3)
with self.assertRaises(ValueError):
FakeRule('not_a_number', 'a')
with self.assertRaises(ValueError):
FakeRule('wrong_order', 1)
fake_rule = FakeRule(2, 'a')
self.assertTrue(fake_rule.x, 2)
self.assertTrue(fake_rule.y, 'a')
self.assertEqual(
fake_rule._PARAMS,
[('x', objects.Real), ('y', objects.UnicodeString)]
)
def test_rule_is_generic(self):
self.assertTrue(rule_domain.is_generic('Real', 'IsGreaterThan'))
self.assertFalse(rule_domain.is_generic('UnicodeString', 'Equals'))
class RuleDataUnitTests(test_utils.GenericTestBase):
"""Tests for the actual rules in extensions/."""
def test_that_all_rules_have_object_editor_templates(self):
rule_dir = os.path.join(os.getcwd(), feconf.RULES_DIR)
at_least_one_rule_found = False
clses = []
for loader, name, _ in pkgutil.iter_modules(path=[rule_dir]):
if name.endswith('_test') or name == 'base':
continue
module = loader.find_module(name).load_module(name)
for name, clazz in inspect.getmembers(module, inspect.isclass):
param_list = rule_domain.get_param_list(clazz.description)
for (param_name, param_obj_type) in param_list:
# TODO(sll): Get rid of this special case.
if param_obj_type.__name__ == 'NonnegativeInt':
continue
self.assertTrue(
param_obj_type.has_editor_js_template(),
msg='(%s)' % clazz.description)
at_least_one_rule_found = True
clses.append(clazz)
self.assertTrue(at_least_one_rule_found)
class RuleFunctionUnitTests(test_utils.GenericTestBase):
"""Test for functions involving rules."""
def test_get_description_strings_for_obj_type(self):
rule_descriptions = rule_domain.get_description_strings_for_obj_type(
'UnicodeString')
self.assertEqual(rule_descriptions, {
'CaseSensitiveEquals': (
'is equal to {{x|UnicodeString}}, taking case into account'),
'Contains': 'contains {{x|UnicodeString}}',
'Equals': 'is equal to {{x|UnicodeString}}',
'MatchesBase64EncodedFile': (
'has same content as the file located at '
'{{filepath|UnicodeString}}'),
'StartsWith': 'starts with {{x|UnicodeString}}',
})
| 34.84127 | 77 | 0.648064 | [
"Apache-2.0"
] | VictoriaRoux/oppia | core/domain/rule_domain_test.py | 4,390 | Python |
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
import numpy as np
from scipy.misc import doccer
from ...stats import nonuniform
from ...auxiliary.array import normalize, nunique, accum
__all__ = ['markov']
_doc_default_callparams = """\
startprob : array_like
Start probabilities.
transmat : array_like
Transition matrix.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
}
# noinspection PyPep8Naming
class markov_gen(object):
"""Markov model.
The `startprob` keyword specifies the start probabilities for the model.
The `transmat` keyword specifies the transition probabilities the model
follows.
Methods
-------
score(x, startprob, transmat)
Log probability of the given data `x`.
sample(x, startprob, transmat, size=1)
Draw random samples from a Markov model.
fit(x)
Fits a Markov model from data via MLE or MAP.
Parameters
----------
%(_doc_default_callparams)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Markov model:
rv = normal_invwishart(startprob=None, transmat=None)
- Frozen object with the same methods but holding the given
start probabilities and transitions fixed.
Examples
--------
>>> from mlpy.stats.models import markov
>>> startprob = np.array([0.1, 0.4, 0.5])
>>> transmat = np.array([[0.3, 0.2, 0.5], [0.6, 0.3, 0.1], [0.1, 0.5, 0.4]])
>>> m = markov(startprob, transmat)
>>> m.sample(size=2)
[[2 2]]
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self):
super(markov_gen, self).__init__()
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, startprob, transmat):
markov_frozen(startprob, transmat)
def score(self, x, startprob, transmat):
"""Log probability for a given data `x`.
Attributes
----------
x : ndarray
Data to evaluate.
%(_doc_default_callparams)s
Returns
-------
log_prob : float
The log probability of the data.
"""
log_transmat = np.log(transmat + np.finfo(float).eps)
log_startprob = np.log(startprob + np.finfo(float).eps)
log_prior = log_startprob[x[:, 0]]
n = x.shape[0]
nstates = log_startprob.shape[0]
logp = np.zeros(n)
for i in range(n):
njk = accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates), dtype=np.int32)
logp[i] = np.sum(njk * log_transmat)
return logp + log_prior
def sample(self, startprob, transmat, size=1):
"""Sample from a Markov model.
Attributes
----------
size: int
Defining number of sampled variates. Defaults to `1`.
Returns
-------
vals: ndarray
The sampled sequences of size (nseq, seqlen).
"""
if np.isscalar(size):
size = (1, size)
vals = np.zeros(size, dtype=np.int32)
nseq, seqlen = size
for i in range(nseq):
vals[i][0] = nonuniform.rvs(startprob)
for t in range(1, seqlen):
vals[i][t] = nonuniform.rvs(transmat[vals[i][t - 1]])
return vals
def fit(self, x):
"""Fit a Markov model from data via MLE or MAP.
Attributes
----------
x : ndarray[int]
Observed data
Returns
-------
%(_doc_default_callparams)s
"""
# TODO: allow to pass pseudo_counts as parameter?
nstates = nunique(x.ravel())
pi_pseudo_counts = np.ones(nstates)
transmat_pseudo_counts = np.ones((nstates, nstates))
n = x.shape[0]
startprob = normalize(np.bincount(x[:, 0])) + pi_pseudo_counts - 1
counts = np.zeros((nstates, nstates))
for i in range(n):
counts += accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates))
transmat = normalize(counts + transmat_pseudo_counts - 1, 1)
return startprob, transmat
markov = markov_gen()
# noinspection PyPep8Naming
class markov_frozen(object):
def __init__(self, startprob, transmat):
"""Create a "frozen" Markov model.
Parameters
----------
startprob : array_like
Start probabilities
transmat : array_like
Transition matrix
"""
self._model = markov_gen()
self.startprob = startprob
self.transmat = transmat
def score(self, x):
return self._model.score(x, self.startprob, self.transmat)
def sample(self, size=1):
return self._model.sample(self.startprob, self.transmat, size)
| 27.236181 | 117 | 0.606827 | [
"MIT"
] | evenmarbles/mlpy | mlpy/stats/models/_basic.py | 5,420 | Python |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Polynomial Regression
"""
import numpy as np
import utils.importerUtils
statsmodels = utils.importerUtils.importModuleLazy("statsmodels", globals())
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, utils
from .TimeSeriesAnalyzer import TimeSeriesCharacterizer, TimeSeriesGenerator
class PolynomialRegression(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = """TimeSeriesAnalysis algorithm for fitting data of degree one or greater."""
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType,
descr="Specifies the degree polynomial to fit the data with."))
return specs
#
# API Methods
#
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings
def characterize(self, signal, pivot, targets, settings):
"""
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for i, value in enumerate(results.params[1:]):
params[target]['model'][f'coef{i+1}'] = value
params[target]['model']['object'] = results
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1,settings['degree']):
names.append(f'{base}__coef{i}')
return names
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, value in info['model'].items():
if name == 'object':
continue
rlz[f'{base}__{name}'] = value
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal
"""
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for t, (target, _) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['model'].items():
if name == 'object':
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}'))
| 36.917722 | 112 | 0.677353 | [
"Apache-2.0"
] | archmagethanos/raven | framework/TSA/PolynomialRegression.py | 5,833 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2014-10-26 13:02:58
# @Author : [email protected]
from sqlalchemy import Column, String, Integer
from sqlalchemy.ext.declarative import declarative_base
from db import dbengine, Base
class User(Base):
__tablename__ = 'user'
cardnum = Column(String(10), primary_key=True)
number = Column(String(50), nullable=True)
password = Column(String(50), nullable=False)
pe_password = Column(String(50), nullable=True)
lib_username = Column(String(50), nullable=True)
lib_password = Column(String(50), nullable=True)
card_query_pwd = Column(String(50), nullable=True)
card_consume_pwd = Column(String(50), nullable=True)
state = Column(Integer, nullable=False)
| 35.857143 | 56 | 0.713147 | [
"MIT"
] | HeraldStudio/herald_auth | mod/models/user.py | 753 | Python |
import torch
import torch.nn as nn
def clip_by_tensor(t, t_min, t_max):
result = (t>=t_min)*t+(t<t_min)*t_min
result = (result<=t_max)*result+(result>t_max)*t_max
return result
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=0.25):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, prediction_tensor, target_tensor):
alpha = self.alpha
gamma = self.gamma
# input:size is M*2. M is the batch number
"""Compute focal loss for predictions.
Multi-labels Focal loss formula:
FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: A float tensor of shape [batch_size, num_anchors]
alpha: A scalar tensor for focal loss alpha hyper-parameter
gamma: A scalar tensor for focal loss gamma hyper-parameter
Returns:
loss: A (scalar) tensor representing the value of the loss function
"""
sigmoid_p = torch.sigmoid(prediction_tensor)
zeros = torch.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
# For poitive prediction, only need consider front part loss, back part is 0;
# target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
pos_p_sub = torch.where(target_tensor > zeros, target_tensor - sigmoid_p, zeros)
# For negative prediction, only need consider back part loss, front part is 0;
# target_tensor > zeros <=> z=1, so negative coefficient = 0.
neg_p_sub = torch.where(target_tensor > zeros, zeros, sigmoid_p)
per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * torch.log(clip_by_tensor(sigmoid_p, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * torch.log(clip_by_tensor(1.0 - sigmoid_p, 1e-8, 1.0))
return per_entry_cross_ent.mean() | 46.897959 | 122 | 0.639252 | [
"Apache-2.0"
] | 86236291/MSAN_Retina | utils/FocalLoss.py | 2,302 | Python |
# Generated by Django 2.2.2 on 2019-07-18 19:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weblog', '0011_auto_20190718_1829'),
]
operations = [
migrations.AddField(
model_name='userdetail',
name='phone',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
| 21.526316 | 73 | 0.606357 | [
"MIT"
] | mmohajer9/Resumo | weblog/migrations/0012_userdetail_phone.py | 409 | Python |
import csv
import itertools
import sys
import re
import math
def get_root_mean_square( mean_square, number):
return math.sqrt(mean_square / number)
def gpsr_tlm_compare(target_arr, answer_arr, lift_off_time, fileobj, csv_header):
cache_idx = 0
sim_data_list = []
start_flight_idx = 0
iter_idx = 0
mean_len_sq = 0.0
mean_speed_sq = 0.0
filecusor = csv.writer(fileobj)
filecusor.writerow(csv_header)
for target_elem in enumerate(target_arr):
sim_data_list = []
iter_idx += 1
if target_elem[0] == 0:
continue;
if float(target_elem[1][0]) == lift_off_time:
start_flight_idx = iter_idx
for answer_elem in enumerate(answer_arr , start = cache_idx):
cache_idx = answer_elem[0]
if answer_elem[0] == 0:
continue;
if abs(float(target_elem[1][0]) - float(answer_elem[1][0])) == 0.0:
# simtime
sim_data_list.append(target_elem[1][0])
# gps sow time
sim_data_list.append(target_elem[1][1])
# DM Length
dm_length = math.sqrt(float(answer_elem[1][2])**2 + float(answer_elem[1][3])**2 + float(answer_elem[1][4])**2)
sim_data_list.append(dm_length)
# DM SPEED
dm_speed = math.sqrt(float(answer_elem[1][5])**2 + float(answer_elem[1][6])**2 + float(answer_elem[1][7])**2)
sim_data_list.append(dm_speed)
# DM ABEE
dm_abee = float(answer_elem[1][10])
sim_data_list.append(dm_abee)
# Target Benchmark (DM_GPSR_TLM - target_GPSR_TLM)
target_length_err = float(answer_elem[1][18]) - float(target_elem[1][18])
target_speed_err = float(answer_elem[1][19]) - float(target_elem[1][19])
sim_data_list.append(target_length_err)
sim_data_list.append(target_speed_err)
# Answer DM-TLM
sim_data_list.append(answer_elem[1][20])
sim_data_list.append(answer_elem[1][21])
# Target DM-TLM
sim_data_list.append(target_elem[1][20])
sim_data_list.append(target_elem[1][21])
filecusor.writerow(sim_data_list)
# Root Mean square
if iter_idx >= start_flight_idx:
mean_len_sq = mean_len_sq + target_length_err**2
mean_speed_sq = mean_speed_sq + target_speed_err**2
break
return (iter_idx - start_flight_idx), mean_len_sq, mean_speed_sq
| 41.968254 | 127 | 0.580938 | [
"BSD-3-Clause"
] | cihuang123/Next-simulation | utilities/log_parser/parser_utility.py | 2,644 | Python |
from databroker.v1 import from_config
from databroker.v0 import Broker
from .. import load_config
name = 'tes'
v0_catalog = Broker.from_config(load_config(f'{name}/{name}.yml'))
v1_catalog = from_config(load_config(f'{name}/{name}.yml'))
catalog = from_config(load_config(f'{name}/{name}.yml')).v2
| 33.222222 | 66 | 0.755853 | [
"BSD-3-Clause"
] | NSLS-II/nsls2-catalogs | nsls2_catalogs/tes/__init__.py | 299 | Python |
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import tempfile
import json
import pytest
from context import app
@pytest.fixture
def client(request):
"""
Definges the client object to make requests against
"""
db_fd, app.APP.config['DATABASE'] = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
"""
Close the client once testing has completed
"""
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client
def test_region_meta(client):
"""
Test that the track endpoint is returning the usage paramerts
"""
rest_value = client.get(
'/mug/api/dmp/file/whole',
headers=dict(Authorization='Authorization: Bearer teststring')
)
details = json.loads(rest_value.data)
# print(details)
assert 'usage' in details
def test_region_file(client):
"""
Test that the track endpoint is returning the usage paramerts
"""
rest_value = client.get(
'/mug/api/dmp/file/region?file_id=testtest0000&chrom=19&start=3000000&end=3100000',
headers=dict(Authorization='Authorization: Bearer teststring')
)
assert len(rest_value.data) > 0
| 28.602941 | 91 | 0.698715 | [
"Apache-2.0"
] | Multiscale-Genomics/mg-rest-file | tests/test_rest_file_region.py | 1,945 | Python |
import asyncio
import io
import time
from firebot import CMD_HELP
from firebot.utils import edit_or_reply, fire_on_cmd, sudo_cmd
@fire.on(fire_on_cmd(pattern="bash ?(.*)"))
@fire.on(sudo_cmd(pattern="bash ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
PROCESS_RUN_TIME = 100
cmd = event.pattern_match.group(1)
tflyf = await edit_or_reply(event, "Processing Your Request...")
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
e = stderr.decode()
if not e:
e = "No Error"
o = stdout.decode()
if not o:
o = "**Tip**: \n`If you want to see the results of your code, I suggest printing them to stdout.`"
else:
_o = o.split("\n")
o = "`\n".join(_o)
OUTPUT = f"**QUERY:**\n__Command:__\n`{cmd}` \n__PID:__\n`{process.pid}`\n\n**stderr:** \n`{e}`\n**Output:**\n{o}"
if len(OUTPUT) > 4095:
with io.BytesIO(str.encode(OUTPUT)) as out_file:
out_file.name = "exec.text"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=cmd,
reply_to=reply_to_id,
)
await event.delete()
await tflyf.edit(OUTPUT)
CMD_HELP.update(
{
"bash": "**Bash**\
\n\n**Syntax : **`.bash <cmd>`\
\n**Usage :** Run Commands Using Userbot"
}
)
| 29.947368 | 118 | 0.596368 | [
"MIT"
] | Anju56/Fire-X | firebot/modules/bash.py | 1,707 | Python |
# Demo Python Datetime - The strftime() Method
'''
The strftime() Method
The datetime object has a method for formatting date objects into readable strings.
The method is called strftime(), and takes one parameter, format, to specify the format of the returned string.
Directive Description Example
%a Weekday, short version Wed
%A Weekday, full version Wednesday
%w Weekday as a number 0-6, 0 is Sunday 3
%d Day of month 01-31 31
%b Month name, short version Dec
%B Month name, full version December
%m Month as a number 01-12 12
%y Year, short version, without century 18
%Y Year, full version 2018
%H Hour 00-23 17
%I Hour 00-12 05
%p AM/PM PM
%M Minute 00-59 41
%S Second 00-59 08
%f Microsecond 000000-999999 548513
%z UTC offset +0100
%Z Timezone CST
%j Day number of year 001-366 365
%U Week number of year, Sunday as the first day of week, 00-53 52
%W Week number of year, Monday as the first day of week, 00-53 52
%c Local version of date and time Mon Dec 31 17:41:00 2018
%x Local version of date 12/31/18
%X Local version of time 17:41:00
%% A % character %
'''
import datetime
x = datetime.datetime.now()
print(x)
print(x.strftime("%z")) | 57.333333 | 111 | 0.354236 | [
"MIT"
] | luis2ra/py3-00-w3schools | 0-python-tutorial/25-dates05_strftime23_z.py | 2,408 | Python |
from abc import ABC, abstractmethod
from status import Status
class State(ABC):
def __init__(self, turret_controls, body_controls, status: Status):
self.turret_controls = turret_controls
self.body_controls = body_controls
self.status = status
@abstractmethod
def perform(self):
pass
@abstractmethod
def calculate_priority(self, is_current_state: bool):
pass
| 23.444444 | 71 | 0.699052 | [
"MIT"
] | Iain530/do-you-have-the-guts2018 | src/states/state.py | 422 | Python |
class Solution(object):
# def findKthLargest(self, nums, k):
# """
# :type nums: List[int]
# :type k: int
# :rtype: int
# """
# return sorted(nums, reverse=True)[k - 1]
# def findKthLargest(self, nums, k):
# # build min heap
# heapq.heapify(nums)
# # remove n - k smallest number
# while len(nums) > k:
# heapq.heappop(nums)
# return nums[0]
# #return heapq.nlargest(k, nums)[-1]
def findKthLargest(self, nums, k):
# shuffle nums to avoid n*n
random.shuffle(nums)
return self.quickSelection(nums, 0, len(nums) - 1, len(nums) - k)
def quickSelection(self, nums, start, end, k):
if start > end:
return float('inf')
pivot = nums[end]
left = start
for i in range(start, end):
if nums[i] <= pivot:
# swip left and i
nums[left], nums[i] = nums[i], nums[left]
left += 1
nums[left], nums[end] = nums[end], nums[left]
if left == k:
return nums[left]
elif left < k:
return self.quickSelection(nums, left + 1, end, k)
else:
return self.quickSelection(nums, start, left - 1, k)
| 31.439024 | 73 | 0.501939 | [
"MIT"
] | CZZLEGEND/leetcode-2 | python/215_Kth_Largest_Element_in_an_Array.py | 1,289 | Python |
from itertools import islice
from tests.unit.utils import Teardown
import inspect
import pytest
import time
import cbpro.messenger
import cbpro.public
import cbpro.private
class TestPrivateClient(object):
def test_private_attr(self, private_client):
assert isinstance(private_client, cbpro.public.PublicClient)
assert hasattr(private_client, 'accounts')
assert hasattr(private_client, 'orders')
assert hasattr(private_client, 'fills')
assert hasattr(private_client, 'limits')
assert hasattr(private_client, 'deposits')
assert hasattr(private_client, 'withdrawals')
assert hasattr(private_client, 'conversions')
assert hasattr(private_client, 'payments')
assert hasattr(private_client, 'coinbase')
assert hasattr(private_client, 'fees')
assert hasattr(private_client, 'reports')
assert hasattr(private_client, 'profiles')
assert hasattr(private_client, 'oracle')
def test_private_accounts(self, private_client):
accounts = private_client.accounts
assert isinstance(accounts, cbpro.messenger.Subscriber)
assert isinstance(accounts, cbpro.private.Accounts)
assert hasattr(accounts, 'list')
assert hasattr(accounts, 'get')
assert hasattr(accounts, 'history')
assert hasattr(accounts, 'holds')
def test_private_orders(self, private_client):
orders = private_client.orders
assert isinstance(orders, cbpro.messenger.Subscriber)
assert isinstance(orders, cbpro.private.Orders)
assert hasattr(orders, 'post')
assert hasattr(orders, 'cancel')
assert hasattr(orders, 'list')
assert hasattr(orders, 'get')
def test_private_fills(self, private_client):
fills = private_client.fills
assert isinstance(fills, cbpro.messenger.Subscriber)
assert isinstance(fills, cbpro.private.Fills)
assert hasattr(fills, 'list')
def test_private_limits(self, private_client):
limits = private_client.limits
assert isinstance(limits, cbpro.messenger.Subscriber)
assert isinstance(limits, cbpro.private.Limits)
assert hasattr(limits, 'get')
def test_private_deposits(self, private_client):
deposits = private_client.deposits
assert isinstance(deposits, cbpro.messenger.Subscriber)
assert isinstance(deposits, cbpro.private.Deposits)
assert hasattr(deposits, 'list')
assert hasattr(deposits, 'get')
assert hasattr(deposits, 'payment')
assert hasattr(deposits, 'coinbase')
assert hasattr(deposits, 'generate')
def test_private_withdrawals(self, private_client):
withdrawals = private_client.withdrawals
assert isinstance(withdrawals, cbpro.messenger.Subscriber)
assert isinstance(withdrawals, cbpro.private.Deposits)
assert isinstance(withdrawals, cbpro.private.Withdrawals)
assert hasattr(withdrawals, 'list')
assert hasattr(withdrawals, 'get')
assert hasattr(withdrawals, 'payment')
assert hasattr(withdrawals, 'coinbase')
assert hasattr(withdrawals, 'generate')
assert hasattr(withdrawals, 'crypto')
assert hasattr(withdrawals, 'estimate')
def test_private_conversions(self, private_client):
conversions = private_client.conversions
assert isinstance(conversions, cbpro.messenger.Subscriber)
assert isinstance(conversions, cbpro.private.Conversions)
assert hasattr(conversions, 'post')
def test_private_payments(self, private_client):
payments = private_client.payments
assert isinstance(payments, cbpro.messenger.Subscriber)
assert isinstance(payments, cbpro.private.Payments)
assert hasattr(payments, 'list')
def test_private_coinbase(self, private_client):
coinbase = private_client.coinbase
assert isinstance(coinbase, cbpro.messenger.Subscriber)
assert isinstance(coinbase, cbpro.private.Coinbase)
assert hasattr(coinbase, 'list')
def test_private_fees(self, private_client):
fees = private_client.fees
assert isinstance(fees, cbpro.messenger.Subscriber)
assert isinstance(fees, cbpro.private.Fees)
assert hasattr(fees, 'list')
def test_private_reports(self, private_client):
reports = private_client.reports
assert isinstance(reports, cbpro.messenger.Subscriber)
assert isinstance(reports, cbpro.private.Reports)
def test_private_profiles(self, private_client):
profiles = private_client.profiles
assert isinstance(profiles, cbpro.messenger.Subscriber)
assert isinstance(profiles, cbpro.private.Profiles)
assert hasattr(profiles, 'list')
assert hasattr(profiles, 'get')
assert hasattr(profiles, 'transfer')
def test_private_oracle(self, private_client):
oracle = private_client.oracle
assert isinstance(oracle, cbpro.messenger.Subscriber)
assert isinstance(oracle, cbpro.private.Oracle)
@pytest.mark.skip
class TestPrivateAccounts(Teardown):
def test_list(self, private_client):
response = private_client.accounts.list()
assert isinstance(response, list)
assert 'currency' in response[0]
def test_get(self, private_client, account_id):
response = private_client.accounts.get(account_id)
assert isinstance(response, dict)
assert 'currency' in response
def test_history(self, private_client, account_id):
response = private_client.accounts.history(account_id)
assert inspect.isgenerator(response)
accounts = list(islice(response, 5))
assert 'amount' in accounts[0]
assert 'details' in accounts[0]
def test_holds(self, private_client, account_id):
response = private_client.accounts.holds(account_id)
assert inspect.isgenerator(response)
holds = list(islice(response, 5))
assert 'type' in holds[0]
assert 'ref' in holds[0]
@pytest.mark.skip
class TestPrivateOrders(Teardown):
def test_post_limit_order(self, private_client, private_model):
json = private_model.orders.limit('buy', 'BTC-USD', 40000.0, 0.001)
response = private_client.orders.post(json)
assert isinstance(response, dict)
assert response['type'] == 'limit'
def test_post_market_order(self, private_client, private_model):
json = private_model.orders.market('buy', 'BTC-USD', size=0.001)
response = private_client.orders.post(json)
assert isinstance(response, dict)
assert 'status' in response
assert response['type'] == 'market'
@pytest.mark.parametrize('stop', ['entry', 'loss'])
def test_post_stop_order(self, private_client, private_model, stop):
json = private_model.orders.market(
'buy', 'BTC-USD', size=0.001, stop=stop, stop_price=30000
)
response = private_client.orders.post(json)
assert isinstance(response, dict)
assert response['stop'] == stop
assert response['type'] == 'market'
def test_cancel(self, private_client, private_model):
json = private_model.orders.limit('buy', 'BTC-USD', 40000.0, 0.001)
order = private_client.orders.post(json)
time.sleep(0.2)
params = private_model.orders.cancel('BTC-USD')
response = private_client.orders.cancel(order['id'], params)
assert isinstance(response, list)
assert response[0] == order['id']
def test_list(self, private_client, private_model):
params = private_model.orders.list('pending')
response = private_client.orders.list(params)
assert inspect.isgenerator(response)
orders = list(islice(response, 10))
assert isinstance(orders, list)
assert 'created_at' in orders[0]
def test_get(self, private_client, private_model):
json = private_model.orders.limit('buy', 'BTC-USD', 40000.0, 0.001)
order = private_client.orders.post(json)
time.sleep(0.2)
response = private_client.orders.get(order['id'])
assert response['id'] == order['id']
@pytest.mark.skip
class TestPrivateFills(Teardown):
def test_list(self, private_client, private_model):
params = private_model.fills.list('BTC-USD')
response = private_client.fills.list(params)
assert inspect.isgenerator(response)
fills = list(islice(response, 10))
assert isinstance(fills, list)
assert 'fill_fees' in fills[0]
@pytest.mark.skip
class TestPrivateLimits(Teardown):
def test_get(self, private_client):
response = private_client.limits.get()
assert isinstance(response, dict)
@pytest.mark.skip
class TestPrivateDeposits(Teardown):
pass
@pytest.mark.skip
class TestPrivateWithdrawals(Teardown):
pass
@pytest.mark.skip
class TestPrivateConversions(Teardown):
def test_post(self, private_client, private_model):
json = private_model.conversions.post('USD', 'USDC', 10.0)
response = private_client.conversions.post(json)
assert isinstance(response, dict)
assert 'id' in response
assert 'amount' in response
assert response['from'] == 'USD'
assert response['to'] == 'USDC'
@pytest.mark.skip
class TestPrivatePayments(Teardown):
def test_list(self, private_client):
response = private_client.payments.list()
assert isinstance(response, list)
@pytest.mark.skip
class TestPrivateCoinbase(Teardown):
def test_list(self, private_client):
response = private_client.coinbase.list()
assert isinstance(response, list)
@pytest.mark.skip
class TestPrivateFees(Teardown):
def test_list(self, private_client):
response = private_client.fees.list()
assert isinstance(response, list)
@pytest.mark.skip
class TestPrivateReports(Teardown):
pass
@pytest.mark.skip
class TestPrivateProfiles(Teardown):
pass
@pytest.mark.skip
class TestPrivateOracle(Teardown):
pass
| 35.038062 | 75 | 0.693067 | [
"MIT"
] | Casaplaya/coinbasepro-python | tests/unit/test_private.py | 10,126 | Python |
from __future__ import unicode_literals
from django.db import models
from timezone_field import TimeZoneField
class FakeModel(models.Model):
tz = TimeZoneField()
tz_opt = TimeZoneField(blank=True)
tz_opt_default = TimeZoneField(blank=True, default='America/Los_Angeles')
| 23.916667 | 77 | 0.787456 | [
"BSD-2-Clause"
] | ambitioninc/django-timezone-field | timezone_field/tests/models.py | 287 | Python |
"""Configuration for reproducing leaderboard of grb-citeseer dataset."""
import torch
import torch.nn.functional as F
from grb.evaluator import metric
model_list = ["gcn",
"gcn_ln",
"gcn_at",
"graphsage",
"graphsage_ln",
"graphsage_at",
"sgcn",
"sgcn_ln",
"sgcn_at",
"robustgcn",
"robustgcn_at",
"tagcn",
"tagcn_ln",
"tagcn_at",
"appnp",
"appnp_ln",
"appnp_at",
"gin",
"gin_ln",
"gin_at",
"gat",
"gat_ln",
"gat_at",
"gcnguard",
"gatguard",
"gcnsvd"]
model_list_basic = ["gcn",
"graphsage",
"sgcn",
"tagcn",
"appnp",
"gin",
"gat"]
modification_attack_list = ["dice",
"rand",
"flip",
"fga",
"nea",
"pgd",
"prbcd",
"stack"]
injection_attack_list = ["rand",
"fgsm",
"pgd",
"speit",
"tdgia"]
model_sur_list = ["gcn"]
def build_model(model_name, num_features, num_classes):
"""Hyper-parameters are determined by auto training, refer to grb.utils.trainer.AutoTrainer."""
if model_name in ["gcn", "gcn_ln", "gcn_at", "gcn_ln_at"]:
from grb.model.torch import GCN
model = GCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.7)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["graphsage", "graphsage_ln", "graphsage_at", "graphsage_ln_at"]:
from grb.model.torch import GraphSAGE
model = GraphSAGE(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=5,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["sgcn", "sgcn_ln", "sgcn_at", "sgcn_ln_at"]:
from grb.model.torch import SGCN
model = SGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=4,
k=4,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.01,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["tagcn", "tagcn_ln", "tagcn_at", "tagcn_ln_at"]:
from grb.model.torch import TAGCN
model = TAGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=3,
k=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["appnp", "appnp_ln", "appnp_at", "appnp_ln_at"]:
from grb.model.torch import APPNP
model = APPNP(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
k=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gin", "gin_ln", "gin_at", "gin_ln_at"]:
from grb.model.torch import GIN
model = GIN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gat", "gat_ln", "gat_at", "gat_ln_at"]:
from grb.model.dgl import GAT
model = GAT(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_layers=3,
n_heads=6,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["robustgcn", "robustgcn_at"]:
from grb.defense import RobustGCN
model = RobustGCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnsvd", "gcnsvd_ln"]:
from grb.defense.gcnsvd import GCNSVD
model = GCNSVD(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnguard"]:
from grb.defense import GCNGuard
model = GCNGuard(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gatguard"]:
from grb.defense import GATGuard
model = GATGuard(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_heads=6,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
def build_optimizer(model, lr):
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
return optimizer
def build_loss():
return F.nll_loss
def build_metric():
return metric.eval_acc
def build_attack(attack_name, device="cpu", args=None, mode="modification"):
if mode == "modification":
if attack_name == "dice":
from grb.attack.modification import DICE
attack = DICE(n_edge_mod=args.n_edge_mod,
ratio_delete=0.6,
device=device)
return attack
if attack_name == "fga":
from grb.attack.modification import FGA
attack = FGA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "flip":
from grb.attack.modification import FLIP
attack = FLIP(n_edge_mod=args.n_edge_mod,
flip_type=args.flip_type,
mode="descend",
device=device)
return attack
if attack_name == "rand":
from grb.attack.modification import RAND
attack = RAND(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "nea":
from grb.attack.modification import NEA
attack = NEA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "stack":
from grb.attack.modification import STACK
attack = STACK(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "pgd":
from grb.attack.modification import PGD
attack = PGD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
if attack_name == "prbcd":
from grb.attack.modification import PRBCD
attack = PRBCD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif mode == "injection":
if attack_name == "rand":
from grb.attack.injection import RAND
attack = RAND(n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
device=device)
return attack
elif attack_name == "fgsm":
from grb.attack.injection import FGSM
attack = FGSM(epsilon=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "pgd":
from grb.attack.injection import PGD
attack = PGD(epsilon=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "speit":
from grb.attack.injection import SPEIT
attack = SPEIT(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "tdgia":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
sequential_step=1.0,
device=device)
return attack
elif attack_name == "tdgia_random":
from grb.attack.injection.tdgia import TDGIA
attack = TDGIA(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
device=device)
return attack
elif attack_name == "tdgia_uniform":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='uniform',
sequential_step=1.0,
device=device)
return attack
else:
raise NotImplementedError
def build_model_autotrain(model_name):
if model_name == "gcn":
from grb.model.torch import GCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GCN, params_search
if model_name == "graphsage":
from grb.model.torch import GraphSAGE
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GraphSAGE, params_search
if model_name == "sgcn":
from grb.model.torch import SGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return SGCN, params_search
if model_name == "tagcn":
from grb.model.torch import TAGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return TAGCN, params_search
if model_name == "appnp":
from grb.model.torch import APPNP
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return APPNP, params_search
if model_name == "gin":
from grb.model.torch import GIN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GIN, params_search
if model_name == "gat":
from grb.model.dgl import GAT
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"n_heads" : trial.suggest_categorical("n_heads", [2, 4, 6, 8]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GAT, params_search
| 39.193015 | 100 | 0.463862 | [
"MIT"
] | sigeisler/grb | pipeline/configs/grb-citeseer/config.py | 21,321 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: sns_topic
short_description: Manages AWS SNS topics and subscriptions
version_added: 1.0.0
description:
- The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
- As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account.
author:
- "Joel Thompson (@joelthompson)"
- "Fernando Jose Pando (@nand0p)"
- "Will Thames (@willthames)"
options:
name:
description:
- The name or ARN of the SNS topic to manage.
required: true
type: str
state:
description:
- Whether to create or destroy an SNS topic.
default: present
choices: ["absent", "present"]
type: str
display_name:
description:
- Display name of the topic.
type: str
policy:
description:
- Policy to apply to the SNS topic.
type: dict
delivery_policy:
description:
- Delivery policy to apply to the SNS topic.
type: dict
subscriptions:
description:
- List of subscriptions to apply to the topic. Note that AWS requires
subscriptions to be confirmed, so you will need to confirm any new
subscriptions.
suboptions:
endpoint:
description: Endpoint of subscription.
required: true
protocol:
description: Protocol of subscription.
required: true
type: list
elements: dict
default: []
purge_subscriptions:
description:
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
allow you to purge any PendingConfirmation subscriptions, so if any
exist and would be purged, they are silently skipped. This means that
somebody could come back later and confirm the subscription. Sorry.
Blame Amazon."
default: true
type: bool
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
requirements: [ "boto" ]
'''
EXAMPLES = r"""
- name: Create alarm SNS topic
community.aws.sns_topic:
name: "alarms"
state: present
display_name: "alarm SNS topic"
delivery_policy:
http:
defaultHealthyRetryPolicy:
minDelayTarget: 2
maxDelayTarget: 4
numRetries: 3
numMaxDelayRetries: 5
backoffFunction: "<linear|arithmetic|geometric|exponential>"
disableSubscriptionOverrides: True
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
- endpoint: "[email protected]"
protocol: "email"
- endpoint: "my_mobile_number"
protocol: "sms"
"""
RETURN = r'''
sns_arn:
description: The ARN of the topic you are modifying
type: str
returned: always
sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name"
community.aws.sns_topic:
description: Dict of sns topic details
type: complex
returned: always
contains:
attributes_set:
description: list of attributes set during this run
returned: always
type: list
sample: []
check_mode:
description: whether check mode was on
returned: always
type: bool
sample: false
delivery_policy:
description: Delivery policy for the SNS topic
returned: when topic is owned by this AWS account
type: str
sample: >
{"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0,
"numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}}
display_name:
description: Display name for SNS topic
returned: when topic is owned by this AWS account
type: str
sample: My topic name
name:
description: Topic name
returned: always
type: str
sample: ansible-test-dummy-topic
owner:
description: AWS account that owns the topic
returned: when topic is owned by this AWS account
type: str
sample: '111111111111'
policy:
description: Policy for the SNS topic
returned: when topic is owned by this AWS account
type: str
sample: >
{"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111111111:root"},
"Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]}
state:
description: whether the topic is present or absent
returned: always
type: str
sample: present
subscriptions:
description: List of subscribers to the topic in this AWS account
returned: always
type: list
sample: []
subscriptions_added:
description: List of subscribers added in this run
returned: always
type: list
sample: []
subscriptions_confirmed:
description: Count of confirmed subscriptions
returned: when topic is owned by this AWS account
type: str
sample: '0'
subscriptions_deleted:
description: Count of deleted subscriptions
returned: when topic is owned by this AWS account
type: str
sample: '0'
subscriptions_existing:
description: List of existing subscriptions
returned: always
type: list
sample: []
subscriptions_new:
description: List of new subscriptions
returned: always
type: list
sample: []
subscriptions_pending:
description: Count of pending subscriptions
returned: when topic is owned by this AWS account
type: str
sample: '0'
subscriptions_purge:
description: Whether or not purge_subscriptions was set
returned: always
type: bool
sample: true
topic_arn:
description: ARN of the SNS topic (equivalent to sns_arn)
returned: when topic is owned by this AWS account
type: str
sample: arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic
topic_created:
description: Whether the topic was created
returned: always
type: bool
sample: false
topic_deleted:
description: Whether the topic was deleted
returned: always
type: bool
sample: false
'''
import json
import re
import copy
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict
class SnsTopicManager(object):
""" Handles SNS Topic creation and destruction """
def __init__(self,
module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode):
self.connection = module.client('sns')
self.module = module
self.name = name
self.state = state
self.display_name = display_name
self.policy = policy
self.delivery_policy = delivery_policy
self.subscriptions = subscriptions
self.subscriptions_existing = []
self.subscriptions_deleted = []
self.subscriptions_added = []
self.purge_subscriptions = purge_subscriptions
self.check_mode = check_mode
self.topic_created = False
self.topic_deleted = False
self.topic_arn = None
self.attributes_set = []
@AWSRetry.jittered_backoff()
def _list_topics_with_backoff(self):
paginator = self.connection.get_paginator('list_topics')
return paginator.paginate().build_full_result()['Topics']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
def _list_topic_subscriptions_with_backoff(self):
paginator = self.connection.get_paginator('list_subscriptions_by_topic')
return paginator.paginate(TopicArn=self.topic_arn).build_full_result()['Subscriptions']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
def _list_subscriptions_with_backoff(self):
paginator = self.connection.get_paginator('list_subscriptions')
return paginator.paginate().build_full_result()['Subscriptions']
def _list_topics(self):
try:
topics = self._list_topics_with_backoff()
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get topic list")
return [t['TopicArn'] for t in topics]
def _topic_arn_lookup(self):
# topic names cannot have colons, so this captures the full topic name
all_topics = self._list_topics()
lookup_topic = ':%s' % self.name
for topic in all_topics:
if topic.endswith(lookup_topic):
return topic
def _create_topic(self):
if not self.check_mode:
try:
response = self.connection.create_topic(Name=self.name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name)
self.topic_arn = response['TopicArn']
return True
def _compare_delivery_policies(self, policy_a, policy_b):
_policy_a = copy.deepcopy(policy_a)
_policy_b = copy.deepcopy(policy_b)
# AWS automatically injects disableSubscriptionOverrides if you set an
# http policy
if 'http' in policy_a:
if 'disableSubscriptionOverrides' not in policy_a['http']:
_policy_a['http']['disableSubscriptionOverrides'] = False
if 'http' in policy_b:
if 'disableSubscriptionOverrides' not in policy_b['http']:
_policy_b['http']['disableSubscriptionOverrides'] = False
comparison = (_policy_a != _policy_b)
return comparison
def _set_topic_attrs(self):
changed = False
try:
topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn)
if self.display_name and self.display_name != topic_attributes['DisplayName']:
changed = True
self.attributes_set.append('display_name')
if not self.check_mode:
try:
self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName',
AttributeValue=self.display_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set display name")
if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])):
changed = True
self.attributes_set.append('policy')
if not self.check_mode:
try:
self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy',
AttributeValue=json.dumps(self.policy))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set topic policy")
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
self._compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))):
changed = True
self.attributes_set.append('delivery_policy')
if not self.check_mode:
try:
self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy',
AttributeValue=json.dumps(self.delivery_policy))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy")
return changed
def _canonicalize_endpoint(self, protocol, endpoint):
if protocol == 'sms':
return re.sub('[^0-9]*', '', endpoint)
return endpoint
def _set_topic_subs(self):
changed = False
subscriptions_existing_list = set()
desired_subscriptions = [(sub['protocol'],
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
self.subscriptions]
for sub in self._list_topic_subscriptions():
sub_key = (sub['Protocol'], sub['Endpoint'])
subscriptions_existing_list.add(sub_key)
if (self.purge_subscriptions and sub_key not in desired_subscriptions and
sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')):
changed = True
self.subscriptions_deleted.append(sub_key)
if not self.check_mode:
try:
self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
for protocol, endpoint in set(desired_subscriptions).difference(subscriptions_existing_list):
changed = True
self.subscriptions_added.append((protocol, endpoint))
if not self.check_mode:
try:
self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn)
return changed
def _list_topic_subscriptions(self):
try:
return self._list_topic_subscriptions_with_backoff()
except is_boto3_error_code('AuthorizationError'):
try:
# potentially AuthorizationError when listing subscriptions for third party topic
return [sub for sub in self._list_subscriptions_with_backoff()
if sub['TopicArn'] == self.topic_arn]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
def _delete_subscriptions(self):
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
subscriptions = self._list_topic_subscriptions()
if not subscriptions:
return False
for sub in subscriptions:
if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
self.subscriptions_deleted.append(sub['SubscriptionArn'])
if not self.check_mode:
try:
self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
return True
def _delete_topic(self):
self.topic_deleted = True
if not self.check_mode:
try:
self.connection.delete_topic(TopicArn=self.topic_arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn)
return True
def _name_is_arn(self):
return self.name.startswith('arn:')
def ensure_ok(self):
changed = False
if self._name_is_arn():
self.topic_arn = self.name
else:
self.topic_arn = self._topic_arn_lookup()
if not self.topic_arn:
changed = self._create_topic()
if self.topic_arn in self._list_topics():
changed |= self._set_topic_attrs()
elif self.display_name or self.policy or self.delivery_policy:
self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account")
changed |= self._set_topic_subs()
return changed
def ensure_gone(self):
changed = False
if self._name_is_arn():
self.topic_arn = self.name
else:
self.topic_arn = self._topic_arn_lookup()
if self.topic_arn:
if self.topic_arn not in self._list_topics():
self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe")
changed = self._delete_subscriptions()
changed |= self._delete_topic()
return changed
def get_info(self):
info = {
'name': self.name,
'state': self.state,
'subscriptions_new': self.subscriptions,
'subscriptions_existing': self.subscriptions_existing,
'subscriptions_deleted': self.subscriptions_deleted,
'subscriptions_added': self.subscriptions_added,
'subscriptions_purge': self.purge_subscriptions,
'check_mode': self.check_mode,
'topic_created': self.topic_created,
'topic_deleted': self.topic_deleted,
'attributes_set': self.attributes_set,
}
if self.state != 'absent':
if self.topic_arn in self._list_topics():
info.update(camel_dict_to_snake_dict(self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']))
info['delivery_policy'] = info.pop('effective_delivery_policy')
info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in self._list_topic_subscriptions()]
return info
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
display_name=dict(),
policy=dict(type='dict'),
delivery_policy=dict(type='dict'),
subscriptions=dict(default=[], type='list', elements='dict'),
purge_subscriptions=dict(type='bool', default=True),
)
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True)
name = module.params.get('name')
state = module.params.get('state')
display_name = module.params.get('display_name')
policy = module.params.get('policy')
delivery_policy = module.params.get('delivery_policy')
subscriptions = module.params.get('subscriptions')
purge_subscriptions = module.params.get('purge_subscriptions')
check_mode = module.check_mode
sns_topic = SnsTopicManager(module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode)
if state == 'present':
changed = sns_topic.ensure_ok()
elif state == 'absent':
changed = sns_topic.ensure_gone()
sns_facts = dict(changed=changed,
sns_arn=sns_topic.topic_arn,
sns_topic=sns_topic.get_info())
module.exit_json(**sns_facts)
if __name__ == '__main__':
main()
| 39.256654 | 162 | 0.638239 | [
"MIT"
] | DiptoChakrabarty/nexus | venv/lib/python3.7/site-packages/ansible_collections/community/aws/plugins/modules/sns_topic.py | 20,649 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .partition_safety_check import PartitionSafetyCheck
class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck):
"""Safety check that waits for the primary replica that was moved out of the
node due to upgrade to be placed back again on that node.
:param kind: Constant filled by server.
:type kind: str
:param partition_id:
:type partition_id: str
"""
_validation = {
'kind': {'required': True},
}
def __init__(self, partition_id=None):
super(WaitForPrimaryPlacementSafetyCheck, self).__init__(partition_id=partition_id)
self.kind = 'WaitForPrimaryPlacement'
| 34.71875 | 91 | 0.633663 | [
"MIT"
] | v-Ajnava/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check.py | 1,111 | Python |
#Shows data from the first 1000 blocks
import random
import os
import subprocess
import json
#Set this to your raven-cli program
cli = "raven-cli"
#mode = "-testnet"
mode = ""
rpc_port = 8746
#Set this information in your raven.conf file (in datadir, not testnet3)
rpc_user = 'rpcuser'
rpc_pass = 'rpcpass555'
def rpc_call(params):
process = subprocess.Popen([cli, mode, params], stdout=subprocess.PIPE)
out, err = process.communicate()
return(out)
def get_blockinfo(num):
rpc_connection = get_rpc_connection()
hash = rpc_connection.getblockhash(num)
blockinfo = rpc_connection.getblock(hash)
return(blockinfo)
def get_rpc_connection():
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
connection = "http://%s:%[email protected]:%s"%(rpc_user, rpc_pass, rpc_port)
#print("Connection: " + connection)
rpc_connection = AuthServiceProxy(connection)
return(rpc_connection)
for i in range(1,1000):
dta = get_blockinfo(i)
print("Block #" + str(i))
print(dta.get('hash'))
print(dta.get('difficulty'))
print(dta.get('time'))
print("")
| 24.391304 | 75 | 0.703209 | [
"MIT"
] | Clotonervo/TestCoin | assets/tools/blockfacts.py | 1,122 | Python |
"""
Distribution class
"""
# To do:
#
# - wrap bins for cyclic histograms
# - check use of float() in count_mag() etc
# - clarify comment about negative selectivity
#
# - function to return value in a range (like a real histogram)
# - cache values
# - assumes cyclic axes start at 0: include a shift based on range
#
# - is there a way to make this work for arrays without mentioning
# "array" anywhere in here?
# - should this be two classes: one for the core (which would be
# small though) and another for statistics?
import numpy as np
import param
import cmath
import math
unavailable_scipy_optimize = False
try:
from scipy import optimize
except ImportError:
param.Parameterized().debug("scipy.optimize not available, dummy von Mises fit")
unavailable_scipy_optimize = True
def wrap(lower, upper, x):
"""
Circularly alias the numeric value x into the range [lower,upper).
Valid for cyclic quantities like orientations or hues.
"""
#I have no idea how I came up with this algorithm; it should be simplified.
#
# Note that Python's % operator works on floats and arrays;
# usually one can simply use that instead. E.g. to wrap array or
# scalar x into 0,2*pi, just use "x % (2*pi)".
axis_range = upper - lower
return lower + (x - lower + 2.0 * axis_range * (1.0 - math.floor(x / (2.0 * axis_range)))) % axis_range
def calc_theta(bins, axis_range):
"""
Convert a bin number to a direction in radians.
Works for NumPy arrays of bin numbers, returning
an array of directions.
"""
return np.exp( (2.0 * np.pi) * bins / axis_range * 1.0j )
class Distribution(object):
"""
Holds a distribution of the values f(x) associated with a variable x.
A Distribution is a histogram-like object that is a dictionary of
samples. Each sample is an x:f(x) pair, where x is called the feature_bin
and f(x) is called the value(). Each feature_bin's value is typically
maintained as the sum of all the values that have been placed into
it.
The feature_bin axis is continuous, and can represent a continuous
quantity without discretization. Alternatively, this class can be
used as a traditional histogram by either discretizing the feature_bin
number before adding each sample, or by binning the values in the
final Distribution.
Distributions are bounded by the specified axis_bounds, and can
either be cyclic (like directions or hues) or non-cyclic. For
cyclic distributions, samples provided outside the axis_bounds
will be wrapped back into the bound range, as is appropriate for
quantities like directions. For non-cyclic distributions,
providing samples outside the axis_bounds will result in a
ValueError.
In addition to the values, can also return the counts, i.e., the
number of times that a sample has been added with the given feature_bin.
Not all instances of this class will be a true distribution in the
mathematical sense; e.g. the values will have to be normalized
before they can be considered a probability distribution.
If keep_peak=True, the value stored in each feature_bin will be the
maximum of all values ever added, instead of the sum. The
distribution will thus be a record of the maximum value
seen at each feature_bin, also known as an envelope.
"""
# Holds the number of times that undefined values have been
# returned from calculations for any instance of this class,
# e.g. calls to vector_direction() or vector_selectivity() when no
# value is non-zero. Useful for warning users when the values are
# not meaningful.
undefined_vals = 0
def __init__(self, axis_bounds, axis_range, cyclic, data, counts, total_count, total_value, theta):
self._data = data
self._counts = counts
# total_count and total_value hold the total number and sum
# (respectively) of values that have ever been provided for
# each feature_bin. For a simple distribution these will be the same as
# sum_counts() and sum_values().
self.total_count = total_count
self.total_value = total_value
self.axis_bounds = axis_bounds
self.axis_range = axis_range
self.cyclic = cyclic
self._pop_store = None
# Cache busy data
self._keys = list(data.keys())
self._values = list(data.values())
self._theta = theta
if self.cyclic:
# Cache the vector sum
self._vector_sum = self._fast_vector_sum(self._values, theta)
else:
self._vector_sum = None
def data(self):
"""
Answer a dictionary with bins as keys.
"""
return self._data
def pop(self, feature_bin):
"""
Remove the entry with bin from the distribution.
"""
if self._pop_store is not None:
raise Exception("Distribution: attempt to pop value before outstanding restore")
self._pop_store = self._data.pop(feature_bin)
self._keys = list(self._data.keys())
self._values = list(self._data.values())
def restore(self, feature_bin):
"""
Restore the entry with bin from the distribution.
Only valid if called after a pop.
"""
if self._pop_store is None:
raise Exception("Distribution: attempt to restore value before pop")
self._data[feature_bin] = self._pop_store
self._pop_store = None
self._keys = list(self._data.keys())
self._values = list(self._data.values())
def vector_sum(self):
"""
Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).
Each feature_bin contributes a vector of length equal to its value, at
a direction corresponding to the feature_bin number. Specifically,
the total feature_bin number range is mapped into a direction range
[0,2pi].
For a cyclic distribution, the avgbinnum will be a continuous
measure analogous to the max_value_bin() of the distribution.
But this quantity has more precision than max_value_bin()
because it is computed from the entire distribution instead of
just the peak feature_bin. However, it is likely to be useful only
for uniform or very dense sampling; with sparse, non-uniform
sampling the estimates will be biased significantly by the
particular samples chosen.
The avgbinnum is not meaningful when the magnitude is 0,
because a zero-length vector has no direction. To find out
whether such cases occurred, you can compare the value of
undefined_vals before and after a series of calls to this
function.
This tries to use cached values of this.
"""
if self._vector_sum is None:
# There is a non cyclic distribution that is using this.
# Calculate and then cache it
# First check if there is a cached theta. If not derive it.
if self._theta is None:
self._theta = calc_theta(np.array(self._keys), self.axis_range)
self._vector_sum = self._fast_vector_sum(self._values, self._theta)
return self._vector_sum
def _fast_vector_sum(self, values, theta):
"""
Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).
This implementation assumes that the values of the distribution needed for the
vector sum will not be changed and depends on cached values.
"""
# vectors are represented in polar form as complex numbers
v_sum = np.inner(values, theta)
magnitude = abs(v_sum)
direction = cmath.phase(v_sum)
if v_sum == 0:
self.undefined_vals += 1
direction_radians = self._radians_to_bins(direction)
# wrap the direction because arctan2 returns principal values
wrapped_direction = wrap(self.axis_bounds[0], self.axis_bounds[1], direction_radians)
return (magnitude, wrapped_direction)
def get_value(self, feature_bin):
"""
Return the value of the specified feature_bin.
(Return None if there is no such feature_bin.)
"""
return self._data.get(feature_bin)
def get_count(self, feature_bin):
"""
Return the count from the specified feature_bin.
(Return None if there is no such feature_bin.)
"""
return self._counts.get(feature_bin)
def values(self):
"""
Return a list of values.
Various statistics can then be calculated if desired:
sum(vals) (total of all values)
max(vals) (highest value in any feature_bin)
Note that the feature_bin-order of values returned does not necessarily
match that returned by counts().
"""
return self._values
def counts(self):
"""
Return a list of values.
Various statistics can then be calculated if desired:
sum(counts) (total of all counts)
max(counts) (highest count in any feature_bin)
Note that the feature_bin-order of values returned does not necessarily
match that returned by values().
"""
return list(self._counts.values())
def bins(self):
"""
Return a list of bins that have been populated.
"""
return self._keys
def sub_distr( self, distr ):
"""
Subtract the given distribution from the current one.
Only existing bins are modified, new bins in the given
distribution are discarded without raising errors.
Note that total_value and total_count are not affected, and
keep_peak is ignored, therefore analysis relying on these
values should not call this method.
"""
for b in distr.bins():
if b in self.bins():
v = distr._data.get(b)
if v is not None: self._data[b] -= v
def max_value_bin(self):
"""
Return the feature_bin with the largest value.
Note that uses cached values so that pop and restore
need to be used if want with altered distribution.
"""
return self._keys[np.argmax(self._values)]
def weighted_sum(self):
"""Return the sum of each value times its feature_bin."""
return np.inner(self._keys, self._values)
def value_mag(self, feature_bin):
"""Return the value of a single feature_bin as a proportion of total_value."""
return self._safe_divide(self._data.get(feature_bin), self.total_value)
def count_mag(self, feature_bin):
"""Return the count of a single feature_bin as a proportion of total_count."""
return self._safe_divide(float(self._counts.get(feature_bin)), float(self.total_count))
# use of float()
def _bins_to_radians(self, bin):
"""
Convert a bin number to a direction in radians.
Works for NumPy arrays of bin numbers, returning
an array of directions.
"""
return (2*np.pi)*bin/self.axis_range
def _radians_to_bins(self, direction):
"""
Convert a direction in radians into a feature_bin number.
Works for NumPy arrays of direction, returning
an array of feature_bin numbers.
"""
return direction * self.axis_range / (2 * np.pi)
def _safe_divide(self, numerator, denominator):
"""
Division routine that avoids division-by-zero errors
(returning zero in such cases) but keeps track of them
for undefined_values().
"""
if denominator == 0:
self.undefined_vals += 1
return 0
else:
return numerator/denominator
class Pref(dict):
"""
This class simply collects named arguments into a dictionary
the main purpose is to make pretty readable the output of DistributionStatisticFn
functions.
In addition, trap missing keys
"""
def __init__(self, **args):
dict.__init__(self, **args)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return None
class DistributionStatisticFn(param.Parameterized):
"""
Base class for various functions performing statistics on a distribution.
"""
value_scale = param.NumericTuple((0.0, 1.0), doc="""
Scaling of the resulting value of the distribution statistics,
typically the preference of a unit to feature values. The tuple
specifies (offset, multiplier) of the output scaling""")
# APNOTE: previously selectivity_scale[ 1 ] used to be 17, a value suitable
# for combining preference and selectivity in HSV plots. Users wishing to keep
# this value should now set it when creating SheetViews, in commands like that
# in command/analysis.py
selectivity_scale = param.NumericTuple((0.0, 1.0), doc="""
Scaling of the resulting measure of the distribution peakedness,
typically the selectivity of a unit to its preferred feature value.
The tuple specifies (offset, multiplier) of the output scaling""")
__abstract = True
def __call__(self, distribution):
"""
Apply the distribution statistic function; must be implemented by subclasses.
Subclasses sould be called with a Distribution as argument, return will be a
dictionary, with Pref objects as values
"""
raise NotImplementedError
class DescriptiveStatisticFn(DistributionStatisticFn):
"""
Abstract class for basic descriptive statistics
"""
def vector_sum(self, d):
"""
Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).
Each bin contributes a vector of length equal to its value, at
a direction corresponding to the bin number. Specifically,
the total bin number range is mapped into a direction range
[0,2pi].
For a cyclic distribution, the avgbinnum will be a continuous
measure analogous to the max_value_bin() of the distribution.
But this quantity has more precision than max_value_bin()
because it is computed from the entire distribution instead of
just the peak bin. However, it is likely to be useful only
for uniform or very dense sampling; with sparse, non-uniform
sampling the estimates will be biased significantly by the
particular samples chosen.
The avgbinnum is not meaningful when the magnitude is 0,
because a zero-length vector has no direction. To find out
whether such cases occurred, you can compare the value of
undefined_vals before and after a series of calls to this
function.
This is a slow algorithm and should only be used if the
contents of the distribution have been changed by the statistical
function.
If not, then the cached value in the distribution should be used.
"""
# vectors are represented in polar form as complex numbers
h = d.data()
theta = calc_theta(np.array(list(h.keys())), d.axis_range)
return d._fast_vector_sum(list(h.values()), theta)
def _weighted_average(self, d ):
"""
Return the weighted_sum divided by the sum of the values
"""
return d._safe_divide(d.weighted_sum(), sum(d.values()))
def selectivity(self, d):
"""
Return a measure of the peakedness of the distribution. The
calculation differs depending on whether this is a cyclic
variable. For a cyclic variable, returns the magnitude of the
vector_sum() divided by the sum_value() (see
_vector_selectivity for more details). For a non-cyclic
variable, returns the max_value_bin()) as a proportion of the
sum_value() (see _relative_selectivity for more details).
"""
if d.cyclic == True:
return self._vector_selectivity(d)
else:
return self._relative_selectivity(d)
# CEBHACKALERT: the definition of selectivity for non-cyclic
# quantities probably needs some more thought.
# Additionally, this fails the test in testfeaturemap
# (see the comment there).
def _relative_selectivity(self, d):
"""
Return max_value_bin()) as a proportion of the sum_value().
This quantity is a measure of how strongly the distribution is
biased towards the max_value_bin(). For a smooth,
single-lobed distribution with an inclusive, non-cyclic range,
this quantity is an analog to vector_selectivity. To be a
precise analog for arbitrary distributions, it would need to
compute some measure of the selectivity that works like the
weighted_average() instead of the max_value_bin(). The result
is scaled such that if all bins are identical, the selectivity
is 0.0, and if all bins but one are zero, the selectivity is
1.0.
"""
# A single feature_bin is considered fully selective (but could also
# arguably be considered fully unselective)
if len(d.data()) <= 1:
return 1.0
proportion = d._safe_divide(max(d.values()), sum(d.values()))
offset = 1.0/len(d.values())
scaled = (proportion-offset) / (1.0-offset)
# negative scaled is possible
# e.g. 2 bins, with values that sum to less than 0.5
# this probably isn't what should be done in those cases
if scaled >= 0.0:
return scaled
else:
return 0.0
def _vector_selectivity(self, d):
"""
Return the magnitude of the vector_sum() divided by the sum_value().
This quantity is a vector-based measure of the peakedness of
the distribution. If only a single feature_bin has a non-zero value(),
the selectivity will be 1.0, and if all bins have the same
value() then the selectivity will be 0.0. Other distributions
will result in intermediate values.
For a distribution with a sum_value() of zero (i.e. all bins
empty), the selectivity is undefined. Assuming that one will
usually be looking for high selectivity, we return zero in such
a case so that high selectivity will not mistakenly be claimed.
To find out whether such cases occurred, you can compare the
value of undefined_values() before and after a series of
calls to this function.
"""
return d._safe_divide(d.vector_sum()[0], sum(d.values()))
__abstract = True
class DescriptiveBimodalStatisticFn(DescriptiveStatisticFn):
"""
Abstract class for descriptive statistics of two-modes distributions
"""
def second_max_value_bin(self, d):
"""
Return the feature_bin with the second largest value.
If there is one feature_bin only, return it. This is not a correct result,
however it is practical for plotting compatibility, and it will not
mistakenly be claimed as secondary maximum, by forcing its selectivity
to 0.0
"""
if len(d.bins()) <= 1:
return d.bins()[0]
k = d.max_value_bin()
d.pop(k)
m = d.max_value_bin()
d.restore(k)
return m
def second_selectivity(self, d):
"""
Return the selectivity of the second largest value in the distribution.
If there is one feature_bin only, the selectivity is 0, since there is no second
peack at all, and this value is also used to discriminate the validity
of second_max_value_bin()
Selectivity is computed in two ways depending on whether the variable is
a cyclic, as in selectivity()
"""
if len( d._data ) <= 1:
return 0.0
if d.cyclic == True:
return self._vector_second_selectivity(d)
else:
return self._relative_second_selectivity(d)
def _relative_second_selectivity(self, d):
"""
Return the value of the second maximum as a proportion of the sum_value()
see _relative_selectivity() for further details
"""
k = d.max_value_bin()
d.pop(k)
m = max(d.values())
d.restore(k)
proportion = d._safe_divide(m, sum(d.values()))
offset = 1.0 / len(d.data())
scaled = (proportion - offset) / (1.0 - offset)
return max(scaled, 0.0)
def _vector_second_selectivity(self, d):
"""
Return the magnitude of the vector_sum() of all bins excluding the
maximum one, divided by the sum_value().
see _vector_selectivity() for further details
"""
k = d.max_value_bin()
d.pop(k)
s = self.vector_sum(d)[0]
d.restore(k)
return self._safe_divide(s, sum(d.values()))
def second_peak_bin(self, d):
"""
Return the feature_bin with the second peak in the distribution.
Unlike second_max_value_bin(), it does not return a feature_bin which is the
second largest value, if laying on a wing of the first peak, the second
peak is returned only if the distribution is truly multimodal. If it isn't,
return the first peak (for compatibility with numpy array type, and
plotting compatibility), however the corresponding selectivity will be
forced to 0.0
"""
h = d.data()
l = len(h)
if l <= 1:
return d.keys()[0]
ks = list(h.keys())
ks.sort()
ik0 = ks.index(d.keys()[np.argmax(d.values())])
k0 = ks[ik0]
v0 = h[k0]
v = v0
k = k0
ik = ik0
while h[k] <= v:
ik += 1
if ik >= l:
ik = 0
if ik == ik0:
return k0
v = h[k]
k = ks[ik]
ik1 = ik
v = v0
k = k0
ik = ik0
while h[k] <= v:
ik -= 1
if ik < 0:
ik = l - 1
if ik == ik0:
return k0
v = h[k]
k = ks[ik]
ik2 = ik
if ik1 == ik2:
return ks[ik1]
ik = ik1
m = 0
while ik != ik2:
k = ks[ik]
if h[k] > m:
m = h[k]
im = ik
ik += 1
if ik >= l:
ik = 0
return ks[im]
def second_peak_selectivity(self, d):
"""
Return the selectivity of the second peak in the distribution.
If the distribution has only one peak, return 0.0, and this value is
also usefl to discriminate the validity of second_peak_bin()
"""
l = len(d.keys())
if l <= 1:
return 0.0
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if p1 == p2:
return 0.0
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = 1.0 / l
scaled = (proportion - offset) / (1.0 - offset)
return max(scaled, 0.0)
def second_peak(self, d):
"""
Return preference and selectivity of the second peak in the distribution.
It is just the combination of second_peak_bin() and
second_peak_selectivity(), with the advantage of avoiding a duplicate
call of second_peak_bin(), if the user is interested in both preference
and selectivity, as often is the case.
"""
l = len(d.keys())
if l <= 1:
return (d.keys()[0], 0.0)
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if p1 == p2:
return (p1, 0.0)
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = 1.0 / l
scaled = (proportion - offset) / (1.0 - offset)
return (p2, max(scaled, 0.0))
__abstract = True
class DSF_MaxValue(DescriptiveStatisticFn):
"""
Return the peak value of the given distribution
"""
def __call__(self, d):
p = self.value_scale[1] * (d.max_value_bin() + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d)+self.selectivity_scale[0])
return {"": Pref(preference=p, selectivity=s)}
class DSF_WeightedAverage(DescriptiveStatisticFn):
"""
Return the main mode of the given distribution
The prefence value ia a continuous, interpolated equivalent of the max_value_bin().
For a cyclic distribution, this is the direction of the vector
sum (see vector_sum()).
For a non-cyclic distribution, this is the arithmetic average
of the data on the bin_axis, where each feature_bin is weighted by its
value.
Such a computation will generally produce much more precise maps using
fewer test stimuli than the discrete method. However, weighted_average
methods generally require uniform and full-range sampling, which is not
always feasible.
For measurements at evenly-spaced intervals over the full range of
possible parameter values, weighted_averages are a good measure of the
underlying continuous-valued parameter preference, assuming that neurons
are tuned broadly enough (and/or sampled finely enough) that they
respond to at least two of the tested parameter values. This method
will not usually give good results when those criteria are not met, i.e.
if the sampling is too sparse, not at evenly-spaced intervals, or does
not cover the full range of possible values. In such cases
max_value_bin should be used, and the number of test patterns will
usually need to be increased instead.
"""
def __call__(self, d):
p = d.vector_sum()[1] if d.cyclic else self._weighted_average(d)
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d) + self.selectivity_scale[0])
return {"": Pref(preference=p, selectivity=s)}
class DSF_TopTwoValues(DescriptiveBimodalStatisticFn):
"""
Return the two max values of distributions in the given matrix
"""
def __call__(self, d):
r = {}
p = self.value_scale[1] * (d.max_value_bin() + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d) + self.selectivity_scale[0])
r[""] = Pref(preference=p, selectivity=s)
p = self.second_max_value_bin(d)
s = self.second_selectivity(d)
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
r["Mode2"] = Pref(preference=p, selectivity=s)
return r
class DSF_BimodalPeaks(DescriptiveBimodalStatisticFn):
"""
Return the two peak values of distributions in the given matrix
"""
def __call__(self, d):
r = {}
p = self.value_scale[1] * (d.max_value_bin() + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d) + self.selectivity_scale[0])
r[""] = Pref(preference=p, selectivity=s)
p, s = self.second_peak(d)
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
r["Mode2"] = Pref(preference=p, selectivity=s)
return r
class VonMisesStatisticFn(DistributionStatisticFn):
"""
Base class for von Mises statistics
"""
# values to fit the maximum value of k parameter in von Mises distribution,
# as a function of the number of bins in the distribution. Useful for
# keeping selectivity in range 0..1. Values derived offline from distribution
# with a single active feature_bin, and total bins from 8 to 32
vm_kappa_fit = (0.206, 0.614)
# level of activity in units confoundable with noise. Used in von Mises fit,
# for two purposes: if the standard deviation of a distribution is below this
# value, the distribution is assumed to lack any mode; it is the maximum level
# of random noise added to a distribution before the fit optimization, for
# stability reasons
noise_level = 0.001
# exit code of the distribution fit function. Codes are function-specific and
# each fit function, if provide exit codes, should have corresponding string translation
fit_exit_code = 0
user_warned_if_unavailable = False
__abstract = True
def _orth(self, t):
"""
Return the orthogonal orientation
"""
if t < 0.5 * np.pi:
return t + 0.5 * np.pi
return t - 0.5 * np.pi
def _in_pi(self, t):
"""
Reduce orientation from -pi..2pi to 0..pi
"""
if t > np.pi:
return t - np.pi
if t < 0:
return t + np.pi
return t
def von_mises(self, pars, x):
"""
Compute a simplified von Mises function.
Original formulation in Richard von Mises, "Wahrscheinlichkeitsrechnung
und ihre Anwendungen in der Statistik und theoretischen Physik", 1931,
Deuticke, Leipzig; see also Mardia, K.V. and Jupp, P.E., " Directional
Statistics", 1999, J. Wiley, p.36;
http://en.wikipedia.org/wiki/Von_Mises_distribution
The two differences are that this function is a continuous probability
distribution on a semi-circle, while von Mises is on the full circle,
and that the normalization factor, which is the inverse of the modified
Bessel function of first kind and 0 degree in the original, is here a fit parameter.
"""
a, k, t = pars
return a * np.exp(k * (np.cos(2 * (x - t)) - 1))
def von2_mises(self, pars, x):
"""
Compute a simplified bimodal von Mises function
Two superposed von Mises functions, with different peak and bandwith values
"""
p1 = pars[: 3]
p2 = pars[3:]
return self.von_mises(p1, x) + self.von_mises(p2, x)
def von_mises_res(self, pars, x, y):
return y - self.von_mises(pars, x)
def von2_mises_res(self, pars, x, y):
return y - self.von2_mises(pars, x)
def norm_sel(self, k, n):
m = (self.vm_kappa_fit[0] + n * self.vm_kappa_fit[1])**2
return np.log(1 + k) / np.log(1 + m)
def fit_vm(self, distribution):
"""
computes the best fit of the monovariate von Mises function in the
semi-circle.
Return a tuple with the orientation preference, in the same range of
axis_bounds, the orientation selectivity, and an estimate of the
goodness-of-fit, as the variance of the predicted orientation
preference. The selectivity is given by the bandwith parameter of the
von Mises function, modified for compatibility with other selectivity
computations in this class. The bandwith parameter is transposed in
logaritmic scale, and is normalized by the maximum value for the number
of bins in the distribution, in order to give roughly 1.0 for a
distribution with one feature_bin at 1.0 an all the other at 0.0, and 0.0 for
uniform distributions. The normalizing factor of the selectivity is fit
for the total number of bins, using fit parameters computed offline.
There are conditions that prevents apriori the possibility to fit the
distribution:
* not enough bins, at least 4 are necessary
* the distribution is too flat, below the noise level
and conditions of aposteriori failures:
* "ier" flag returned by leastsq out of ( 1, 2, 3, 4 )
* no estimated Jacobian around the solution
* negative bandwith (the peak of the distribution is convex)
Note that these are the minimal conditions, their fulfillment does not
warrant unimodality, is up to the user to check the goodness-of-fit value
for an accurate acceptance of the fit.
"""
if unavailable_scipy_optimize:
if not VonMisesStatisticFn.user_warned_if_unavailable:
param.Parameterized().warning("scipy.optimize not available, dummy von Mises fit")
VonMisesStatisticFn.user_warned_if_unavailable=True
self.fit_exit_code = 3
return 0, 0, 0
to_pi = np.pi / distribution.axis_range
x = to_pi * np.array(distribution.bins())
n = len(x)
if n < 5:
param.Parameterized().warning("No von Mises fit possible with less than 4 bins")
self.fit_exit_code = -1
return 0, 0, 0
y = np.array(distribution.values())
if y.std() < self.noise_level:
self.fit_exit_code = 1
return 0, 0, 0
rn = self.noise_level * np.random.random_sample(y.shape)
p0 = (1.0, 1.0, distribution.max_value_bin())
r = optimize.leastsq(self.von_mises_res, p0, args=(x, y + rn),
full_output=True)
if not r[-1] in ( 1, 2, 3, 4 ):
self.fit_exit_code = 100 + r[-1]
return 0, 0, 0
residuals = r[2]['fvec']
jacobian = r[1]
bandwith = r[0][1]
tuning = r[0][2]
if bandwith < 0:
self.fit_exit_code = 1
return 0, 0, 0
if jacobian is None:
self.fit_exit_code = 2
return 0, 0, 0
error = (residuals**2).sum() / (n - len(p0))
covariance = jacobian * error
g = covariance[2, 2]
p = self._in_pi(tuning) / to_pi
s = self.norm_sel(bandwith, n)
self.fit_exit_code = 0
return p, s, g
def vm_fit_exit_codes(self):
if self.fit_exit_code == 0:
return "succesfull exit"
if self.fit_exit_code == -1:
return "not enough bins for this fit"
if self.fit_exit_code == 1:
return "flat distribution"
if self.fit_exit_code == 2:
return "flat distribution"
if self.fit_exit_code == 3:
return "missing scipy.optimize import"
if self.fit_exit_code > 110:
return "unknown exit code"
if self.fit_exit_code > 100:
return "error " + str(self.fit_exit_code - 100) + " in scipy.optimize.leastsq"
return "unknown exit code"
def fit_v2m(self, distribution):
"""
computes the best fit of the bivariate von Mises function in the
semi-circle.
Return the tuple:
(
orientation1_preference, orientation1_selectivity, goodness_of_fit1,
orientation2_preference, orientation2_selectivity, goodness_of_fit2
)
See fit_vm() for considerations about selectivity and goodness_of_fit
"""
null = 0, 0, 0, 0, 0, 0
if unavailable_scipy_optimize:
if not VonMisesStatisticFn.user_warned_if_unavailable:
param.Parameterized().warning("scipy.optimize not available, dummy von Mises fit")
VonMisesStatisticFn.user_warned_if_unavailable=True
self.fit_exit_code = 3
return null
to_pi = np.pi / distribution.axis_range
x = to_pi * np.array(distribution.bins())
n = len(x)
if n < 9:
param.Parameterized().warning( "no bimodal von Mises fit possible with less than 8 bins" )
self.fit_exit_code = -1
return null
y = np.array(distribution.values())
if y.std() < self.noise_level:
self.fit_exit_code = 1
return null
rn = self.noise_level * np.random.random_sample(y.shape)
t0 = distribution.max_value_bin()
p0 = (1.0, 1.0, t0, 1.0, 1.0, self._orth(t0))
r = optimize.leastsq(self.von2_mises_res, p0, args=(x, y + rn),
full_output=True)
if not r[-1] in ( 1, 2, 3, 4 ):
self.fit_exit_code = 100 + r[-1]
return null
residuals = r[2]['fvec']
jacobian = r[1]
bandwith_1 = r[0][1]
tuning_1 = r[0][2]
bandwith_2 = r[0][4]
tuning_2 = r[0][5]
if jacobian is None:
self.fit_exit_code = 2
return null
if bandwith_1 < 0:
self.fit_exit_code = 1
return null
if bandwith_2 < 0:
self.fit_exit_code = 1
return null
error = (residuals ** 2).sum() / (n - len(p0))
covariance = jacobian * error
g1 = covariance[2, 2]
g2 = covariance[5, 5]
p1 = self._in_pi(tuning_1) / to_pi
p2 = self._in_pi(tuning_2) / to_pi
s1 = self.norm_sel(bandwith_1, n)
s2 = self.norm_sel(bandwith_2, n)
self.fit_exit_code = 0
return p1, s1, g1, p2, s2, g2
def __call__(self, distribution):
"""
Apply the distribution statistic function; must be implemented by subclasses.
"""
raise NotImplementedError
class DSF_VonMisesFit(VonMisesStatisticFn):
"""
Return the main mode of distribution in the given matrix, by fit with von Mises function.
"""
worst_fit = param.Number(default=0.5, bounds=(0.0, None), softbounds=(0.0, 1.0), doc="""
worst good-of-fitness value for accepting the distribution as monomodal""")
# default result in case of failure of the fit
null_result = {"": Pref(preference=0, selectivity=0, goodness_of_fit=0),
"Modes": Pref(number=0)}
def __call__(self, distribution):
f = self.fit_vm(distribution)
if self.fit_exit_code != 0 or f[-1] > self.worst_fit:
return self.null_result
results = {}
p, s, g = f
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
results[""] = Pref(preference=p, selectivity=s, goodness_of_fit=g)
results["Modes"] = Pref(number=1)
return results
class DSF_BimodalVonMisesFit(VonMisesStatisticFn):
"""
Return the two modes of distributions in the given matrix, by fit with von Mises function
The results of the main mode are available in
self.{preference,selectivity,good_of_fit}, while the second mode results are
in the first element of the self.more_modes list, as a dictionary with keys
preference,selectivity,good_of_fit.
"""
worst_fit = param.Number(default=0.5, bounds=(0.0, None), softbounds=(0.0, 1.0), doc="""
Worst good-of-fitness value for accepting the distribution as mono- or bi-modal""")
# default result in case of failure of the fit
null_result = {
"": Pref(preference=0, selectivity=0, goodness_of_fit=0),
"Mode2": Pref(preference=0, selectivity=0, goodness_of_fit=0),
"Modes": Pref(number=0)
}
def _analyze_distr(self, d):
"""
Analyze the given distribution with von Mises bimodal fit.
The distribution is analyzed with both unimodal and bimodal fits, and a
decision about the number of modes is made by comparing the goodness of
fit. It is a quick but inaccurate way of estimating the number of modes.
Return preference, selectivity, goodness of fit for both modes, and the
estimated numer of modes, None if even the unimodal fit failed. If the
distribution is unimodal, values of the second mode are set to 0. The main
mode is always the one with the largest selectivity (von Mises bandwith).
"""
no1 = False
f = self.fit_vm(d)
if self.fit_exit_code != 0:
no1 = True
p, s, g = f
f2 = self.fit_v2m(d)
if self.fit_exit_code != 0 or f2[2] > self.worst_fit:
if no1 or f[-1] > self.worst_fit:
return None
return p, s, g, 0, 0, 0, 1
p1, s1, g1, p2, s2, g2 = f2
if g1 > g:
return p, s, g, 0, 0, 0, 1
if s2 > s1:
return p2, s2, g2, p1, s1, g1, 2
return p1, s1, g1, p2, s2, g2, 2
def __call__(self, distribution):
f = self._analyze_distr(distribution)
if f is None:
return self.null_result
results = {}
p, s, g = f[: 3]
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
results[""] = Pref(preference=p, selectivity=s, goodness_of_fit=g)
p, s, g, n = f[3:]
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
results["Mode2"] = Pref(preference=p, selectivity=s, goodness_of_fit=g)
results["Modes"] = Pref(number=n)
return results
| 35.28449 | 107 | 0.624888 | [
"BSD-3-Clause"
] | fcr/featuremapper | featuremapper/distribution.py | 41,177 | Python |
import logging
import sys
import re
import time
import random
import utils
logger = utils.loggerMaster('slack.lexicon')
def response(type):
phrases={'greetings':[", welcome back", "Hi there", "Good to see you again", "Hello again", "hi"],
'farewells':['bye']
}
try:
length=len(phrases[type])
return phrases[type][(random.randint(0,length-1))]
except KeyError:
logger.error('lexicon read error')
return ('There is an error in the lexicon file you idiot')
def main():
print "This is a module designed to be used with RaspiSlack"
if __name__ == "__main__":
main()
| 18.617647 | 102 | 0.649289 | [
"MIT"
] | philipok-1/raspberry-slack | lexicon.py | 633 | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: Shijie Qin
@license: Apache Licence
@contact: [email protected]
@site: https://shijieqin.github.io
@software: PyCharm
@file: __init__.py.py
@time: 2018/11/8 3:13 PM
"""
| 17.928571 | 35 | 0.669323 | [
"Apache-2.0"
] | shijieqin/flatfish | core/__init__.py | 251 | Python |
"""
Bilinear Attention Networks
Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang
https://arxiv.org/abs/1805.07932
This code is written by Jin-Hwa Kim.
"""
import sys
sys.path.append('./ban')
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.weight_norm import weight_norm
from attention import BiAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
from bc import BCNet
from counting import Counter
from torch.autograd import Variable
class GRU(nn.Module):
"""
Gated Recurrent Unit without long-term memory
"""
def __init__(self,embed_size=512):
super(GRU,self).__init__()
self.update_x = nn.Linear(embed_size,embed_size,bias=True)
self.update_h = nn.Linear(embed_size,embed_size,bias=True)
self.reset_x = nn.Linear(embed_size,embed_size,bias=True)
self.reset_h = nn.Linear(embed_size,embed_size,bias=True)
self.memory_x = nn.Linear(embed_size,embed_size,bias=True)
self.memory_h = nn.Linear(embed_size,embed_size,bias=True)
def forward(self,x,state):
z = F.sigmoid(self.update_x(x) + self.update_h(state))
r = F.sigmoid(self.reset_x(x) + self.reset_h(state))
mem = F.tanh(self.memory_x(x) + self.memory_h(torch.mul(r,state)))
state = torch.mul(1-z,state) + torch.mul(z,mem)
return state
def process_lengths(input):
"""
Computing the lengths of sentences in current batchs
"""
max_length = input.size(1)
lengths = list(max_length - input.data.eq(0).sum(1).squeeze())
return lengths
def select_last(x, lengths):
"""
Adaptively select the hidden state at the end of sentences
"""
batch_size = x.size(0)
seq_length = x.size(1)
mask = x.data.new().resize_as_(x.data).fill_(0)
for i in range(batch_size):
mask[i][lengths[i]-1].fill_(1)
mask = Variable(mask)
x = x.mul(mask)
x = x.sum(1).view(batch_size, x.size(2), x.size(3))
return x
class BanModel(nn.Module):
def __init__(self, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, glimpse,num_hid):
super(BanModel, self).__init__()
self.op = op
self.glimpse = glimpse
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.b_net = nn.ModuleList(b_net)
self.q_prj = nn.ModuleList(q_prj)
self.c_prj = nn.ModuleList(c_prj)
self.classifier = classifier
self.counter = counter
self.drop = nn.Dropout(.5)
self.tanh = nn.Tanh()
def forward(self, v, b, q):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb) # [batch, q_len, q_dim]
boxes = b[:,:,:4].transpose(1,2)
b_emb = [0] * self.glimpse
att, logits = self.v_att.forward_all(v, q_emb) # b x g x v x q
for g in range(self.glimpse):
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att[:,g,:,:]) # b x l x h
atten, _ = logits[:,g,:,:].max(2)
embed = self.counter(boxes, atten)
q_emb = self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb
q_emb = q_emb + self.c_prj[g](embed).unsqueeze(1)
logits = self.classifier(q_emb.sum(1))
return F.softmax(logits,dim=-1), att
def build_ban(num_token, v_dim, num_hid, num_ans, op='', gamma=4, reasoning=False):
w_emb = WordEmbedding(num_token, 300, .0, op)
q_emb = QuestionEmbedding(300 if 'c' not in op else 600, num_hid, 1, False, .0)
if not reasoning:
v_att = BiAttention(v_dim, num_hid, num_hid, gamma)
else:
v_att = BiAttention(v_dim, num_hid, num_hid, 1)
# constructing the model
b_net = []
q_prj = []
c_prj = []
objects = 36 # minimum number of boxes, originally 10
for i in range(gamma):
b_net.append(BCNet(v_dim, num_hid, num_hid, None, k=1))
q_prj.append(FCNet([num_hid, num_hid], '', .2))
c_prj.append(FCNet([objects + 1, num_hid], 'ReLU', .0))
classifier = SimpleClassifier(
num_hid, num_hid * 2, num_ans, .5)
counter = Counter(objects)
if not reasoning:
return BanModel(w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, gamma, num_hid)
else:
return BanModel_Reasoning(w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, gamma, num_hid)
class BanModel_Reasoning(nn.Module):
def __init__(self, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, glimpse,num_hid):
super(BanModel_Reasoning, self).__init__()
self.op = op
self.glimpse = glimpse
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.b_net = nn.ModuleList(b_net)
self.q_prj = nn.ModuleList(q_prj)
self.c_prj = nn.ModuleList(c_prj)
self.classifier = classifier
self.counter = counter
self.drop = nn.Dropout(.5)
self.tanh = nn.Tanh()
self.semantic_rnn = GRU(256)
self.semantic_q = nn.Linear(num_hid,256)
self.semantic_pred = nn.Linear(256,9)
self.semantic_embed = nn.Embedding(num_embeddings=9,embedding_dim=256) # embedding layer for the semantic operations
self.att_p = nn.Linear(num_hid,num_hid)
self.att = nn.Linear(num_hid,1)
self.att_s = nn.Linear(256,num_hid)
self.att_v = nn.Linear(2048,num_hid)
def init_hidden_state(self,batch,s_embed=256):
init_s = torch.zeros(batch,s_embed).cuda()
return init_s
def forward(self, v, b, q):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb) # [batch, q_len, q_dim]
ori_q_emb = q_emb
boxes = b[:,:,:4].transpose(1,2)
b_emb = [0] * self.glimpse
s_x = self.init_hidden_state(len(q),256)
s_h = torch.tanh(self.semantic_q(ori_q_emb.mean(1)))
v_att = torch.tanh(self.att_v(F.dropout(v,0.25)))
op = []
att_mask = []
q_emb_pool = []
for g in range(self.glimpse):
# reasoning attention
s_h = self.semantic_rnn(s_x,s_h)
s_x = F.softmax(self.semantic_pred(s_h),dim=-1)
op.append(s_x)
s_x = torch.max(s_x,dim=-1)[1]
s_x = self.semantic_embed(s_x)
s_att = torch.tanh(self.att_s(s_h)).unsqueeze(1).expand_as(v_att)
fuse_feat = torch.tanh(self.att_p(torch.mul(s_att,v_att)))
reason_att = self.att(fuse_feat)
reason_att = F.softmax(reason_att.view(reason_att.size(0),-1),dim=-1)
# reason_att = torch.sigmoid(reason_att.view(reason_att.size(0),-1),dim=-1)
# cur_v = v + torch.mul(v,reason_att.unsqueeze(-1).expand_as(v))
cur_v = torch.mul(v,reason_att.unsqueeze(-1).expand_as(v))
# original ban
att, logits = self.v_att(cur_v, ori_q_emb) # b x g x v x q
att, logits = att.squeeze(), logits.squeeze()
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att) # b x l x h
atten, _ = logits.max(2)
embed = self.counter(boxes, atten)
q_emb = self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb
q_emb = q_emb + self.c_prj[g](embed).unsqueeze(1)
q_emb_pool.append(q_emb)
att_mask.append(reason_att)
op = torch.cat([_.unsqueeze(1) for _ in op],dim=1)
att_mask = torch.cat([_.unsqueeze(1) for _ in att_mask],dim=1)
valid_op = process_lengths(torch.max(op,dim=-1)[1])
q_emb_pool = torch.cat([_.unsqueeze(1) for _ in q_emb_pool],dim=1)
q_emb = select_last(q_emb_pool,valid_op)
logits = self.classifier(q_emb.sum(1))
return F.softmax(logits,dim=-1), op, att_mask
| 36.00885 | 124 | 0.617842 | [
"MIT"
] | szzexpoi/AiR | AiR-M/ban/base_model.py | 8,138 | Python |
import versioneer
commands = versioneer.get_cmdclass().copy()
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
setup(
name='ngenix-test',
version=versioneer.get_version(),
packages=find_packages(),
url='https://github.com/adalekin/ngenix-test',
license='MIT',
author='Aleksey Dalekin',
author_email='[email protected]',
description='A te.',
long_description=open('README.md', 'rt').read(),
package_dir={'ngenix_test': 'ngenix_test'},
include_package_data=True,
install_requires=[
],
cmdclass=commands,
entry_points='''
[console_scripts]
nginx-test=nginx_test.run:main
'''
)
| 26.241379 | 53 | 0.65703 | [
"MIT"
] | adalekin/ngenix-test | setup.py | 761 | Python |
"""Tag the sandbox for release, make source and doc tarballs.
Requires Python 2.6
Example of invocation (use to test the script):
python makerelease.py --force --retag --platform=msvc6,msvc71,msvc80,mingw -ublep 0.5.0 0.6.0-dev
Example of invocation when doing a release:
python makerelease.py 0.5.0 0.6.0-dev
"""
import os.path
import subprocess
import sys
import doxybuild
import subprocess
import xml.etree.ElementTree as ElementTree
import shutil
import urllib2
import tempfile
import os
import time
from devtools import antglob, fixeol, tarball
SVN_ROOT = 'https://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/'
SVN_TAG_ROOT = SVN_ROOT + 'tags/jsoncpp'
SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
SOURCEFORGE_PROJECT = 'jsoncpp'
def set_version( version ):
with open('version','wb') as f:
f.write( version.strip() )
def rmdir_if_exist( dir_path ):
if os.path.isdir( dir_path ):
shutil.rmtree( dir_path )
class SVNError(Exception):
pass
def svn_command( command, *args ):
cmd = ['svn', '--non-interactive', command] + list(args)
print 'Running:', ' '.join( cmd )
process = subprocess.Popen( cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
error = SVNError( 'SVN command failed:\n' + stdout )
error.returncode = process.returncode
raise error
return stdout
def check_no_pending_commit():
"""Checks that there is no pending commit in the sandbox."""
stdout = svn_command( 'status', '--xml' )
etree = ElementTree.fromstring( stdout )
msg = []
for entry in etree.getiterator( 'entry' ):
path = entry.get('path')
status = entry.find('wc-status').get('item')
if status != 'unversioned' and path != 'version':
msg.append( 'File "%s" has pending change (status="%s")' % (path, status) )
if msg:
msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!' )
return '\n'.join( msg )
def svn_join_url( base_url, suffix ):
if not base_url.endswith('/'):
base_url += '/'
if suffix.startswith('/'):
suffix = suffix[1:]
return base_url + suffix
def svn_check_if_tag_exist( tag_url ):
"""Checks if a tag exist.
Returns: True if the tag exist, False otherwise.
"""
try:
list_stdout = svn_command( 'list', tag_url )
except SVNError, e:
if e.returncode != 1 or not str(e).find('tag_url'):
raise e
# otherwise ignore error, meaning tag does not exist
return False
return True
def svn_commit( message ):
"""Commit the sandbox, providing the specified comment.
"""
svn_command( 'ci', '-m', message )
def svn_tag_sandbox( tag_url, message ):
"""Makes a tag based on the sandbox revisions.
"""
svn_command( 'copy', '-m', message, '.', tag_url )
def svn_remove_tag( tag_url, message ):
"""Removes an existing tag.
"""
svn_command( 'delete', '-m', message, tag_url )
def svn_export( tag_url, export_dir ):
"""Exports the tag_url revision to export_dir.
Target directory, including its parent is created if it does not exist.
If the directory export_dir exist, it is deleted before export proceed.
"""
rmdir_if_exist( export_dir )
svn_command( 'export', tag_url, export_dir )
def fix_sources_eol( dist_dir ):
"""Set file EOL for tarball distribution.
"""
print 'Preparing exported source file EOL for distribution...'
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
win_sources = antglob.glob( dist_dir,
includes = '**/*.sln **/*.vcproj',
prune_dirs = prune_dirs )
unix_sources = antglob.glob( dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
prune_dirs = prune_dirs )
for path in win_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\r\n' )
for path in unix_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\n' )
def download( url, target_path ):
"""Download file represented by url to target_path.
"""
f = urllib2.urlopen( url )
try:
data = f.read()
finally:
f.close()
fout = open( target_path, 'wb' )
try:
fout.write( data )
finally:
fout.close()
def check_compile( distcheck_top_dir, platform ):
cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
print 'Running:', ' '.join( cmd )
log_path = os.path.join( distcheck_top_dir, 'build-%s.log' % platform )
flog = open( log_path, 'wb' )
try:
process = subprocess.Popen( cmd,
stdout=flog,
stderr=subprocess.STDOUT,
cwd=distcheck_top_dir )
stdout = process.communicate()[0]
status = (process.returncode == 0)
finally:
flog.close()
return (status, log_path)
def write_tempfile( content, **kwargs ):
fd, path = tempfile.mkstemp( **kwargs )
f = os.fdopen( fd, 'wt' )
try:
f.write( content )
finally:
f.close()
return path
class SFTPError(Exception):
pass
def run_sftp_batch( userhost, sftp, batch, retry=0 ):
path = write_tempfile( batch, suffix='.sftp', text=True )
# psftp -agent -C blep,[email protected] -batch -b batch.sftp -bc
cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
error = None
for retry_index in xrange(0, max(1,retry)):
heading = retry_index == 0 and 'Running:' or 'Retrying:'
print heading, ' '.join( cmd )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode != 0:
error = SFTPError( 'SFTP batch failed:\n' + stdout )
else:
break
if error:
raise error
return stdout
def sourceforge_web_synchro( sourceforge_project, doc_dir,
user=None, sftp='sftp' ):
"""Notes: does not synchronize sub-directory of doc-dir.
"""
userhost = '%s,%[email protected]' % (user, sourceforge_project)
stdout = run_sftp_batch( userhost, sftp, """
cd htdocs
dir
exit
""" )
existing_paths = set()
collect = 0
for line in stdout.split('\n'):
line = line.strip()
if not collect and line.endswith('> dir'):
collect = True
elif collect and line.endswith('> exit'):
break
elif collect == 1:
collect = 2
elif collect == 2:
path = line.strip().split()[-1:]
if path and path[0] not in ('.', '..'):
existing_paths.add( path[0] )
upload_paths = set( [os.path.basename(p) for p in antglob.glob( doc_dir )] )
paths_to_remove = existing_paths - upload_paths
if paths_to_remove:
print 'Removing the following file from web:'
print '\n'.join( paths_to_remove )
stdout = run_sftp_batch( userhost, sftp, """cd htdocs
rm %s
exit""" % ' '.join(paths_to_remove) )
print 'Uploading %d files:' % len(upload_paths)
batch_size = 10
upload_paths = list(upload_paths)
start_time = time.time()
for index in xrange(0,len(upload_paths),batch_size):
paths = upload_paths[index:index+batch_size]
file_per_sec = (time.time() - start_time) / (index+1)
remaining_files = len(upload_paths) - index
remaining_sec = file_per_sec * remaining_files
print '%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec)
run_sftp_batch( userhost, sftp, """cd htdocs
lcd %s
mput %s
exit""" % (doc_dir, ' '.join(paths) ), retry=3 )
def sourceforge_release_tarball( sourceforge_project, paths, user=None, sftp='sftp' ):
userhost = '%s,%[email protected]' % (user, sourceforge_project)
run_sftp_batch( userhost, sftp, """
mput %s
exit
""" % (' '.join(paths),) )
def main():
usage = """%prog release_version next_dev_version
Update 'version' file to release_version and commit.
Generates the document tarball.
Tags the sandbox revision with release_version.
Update 'version' file to next_dev_version and commit.
Performs an svn export of tag release version, and build a source tarball.
Must be started in the project top directory.
Warning: --force should only be used when developping/testing the release script.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--dot', dest="dot_path", action='store', default=doxybuild.find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=doxybuild.find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--force', dest="ignore_pending_commit", action='store_true', default=False,
help="""Ignore pending commit. [Default: %default]""")
parser.add_option('--retag', dest="retag_release", action='store_true', default=False,
help="""Overwrite release existing tag if it exist. [Default: %default]""")
parser.add_option('-p', '--platforms', dest="platforms", action='store', default='',
help="""Comma separated list of platform passed to scons for build check.""")
parser.add_option('--no-test', dest="no_test", action='store_true', default=False,
help="""Skips build check.""")
parser.add_option('--no-web', dest="no_web", action='store_true', default=False,
help="""Do not update web site.""")
parser.add_option('-u', '--upload-user', dest="user", action='store',
help="""Sourceforge user for SFTP documentation upload.""")
parser.add_option('--sftp', dest='sftp', action='store', default=doxybuild.find_program('psftp', 'sftp'),
help="""Path of the SFTP compatible binary used to upload the documentation.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 2:
parser.error( 'release_version missing on command-line.' )
release_version = args[0]
next_version = args[1]
if not options.platforms and not options.no_test:
parser.error( 'You must specify either --platform or --no-test option.' )
if options.ignore_pending_commit:
msg = ''
else:
msg = check_no_pending_commit()
if not msg:
print 'Setting version to', release_version
set_version( release_version )
svn_commit( 'Release ' + release_version )
tag_url = svn_join_url( SVN_TAG_ROOT, release_version )
if svn_check_if_tag_exist( tag_url ):
if options.retag_release:
svn_remove_tag( tag_url, 'Overwriting previous tag' )
else:
print 'Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url
sys.exit( 1 )
svn_tag_sandbox( tag_url, 'Release ' + release_version )
print 'Generated doxygen document...'
## doc_dirname = r'jsoncpp-api-html-0.5.0'
## doc_tarball_path = r'e:\prg\vc\Lib\jsoncpp-trunk\dist\jsoncpp-api-html-0.5.0.tar.gz'
doc_tarball_path, doc_dirname = doxybuild.build_doc( options, make_release=True )
doc_distcheck_dir = 'dist/doccheck'
tarball.decompress( doc_tarball_path, doc_distcheck_dir )
doc_distcheck_top_dir = os.path.join( doc_distcheck_dir, doc_dirname )
export_dir = 'dist/export'
svn_export( tag_url, export_dir )
fix_sources_eol( export_dir )
source_dir = 'jsoncpp-src-' + release_version
source_tarball_path = 'dist/%s.tar.gz' % source_dir
print 'Generating source tarball to', source_tarball_path
tarball.make_tarball( source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir )
# Decompress source tarball, download and install scons-local
distcheck_dir = 'dist/distcheck'
distcheck_top_dir = distcheck_dir + '/' + source_dir
print 'Decompressing source tarball to', distcheck_dir
rmdir_if_exist( distcheck_dir )
tarball.decompress( source_tarball_path, distcheck_dir )
scons_local_path = 'dist/scons-local.tar.gz'
print 'Downloading scons-local to', scons_local_path
download( SCONS_LOCAL_URL, scons_local_path )
print 'Decompressing scons-local to', distcheck_top_dir
tarball.decompress( scons_local_path, distcheck_top_dir )
# Run compilation
print 'Compiling decompressed tarball'
all_build_status = True
for platform in options.platforms.split(','):
print 'Testing platform:', platform
build_status, log_path = check_compile( distcheck_top_dir, platform )
print 'see build log:', log_path
print build_status and '=> ok' or '=> FAILED'
all_build_status = all_build_status and build_status
if not build_status:
print 'Testing failed on at least one platform, aborting...'
svn_remove_tag( tag_url, 'Removing tag due to failed testing' )
sys.exit(1)
if options.user:
if not options.no_web:
print 'Uploading documentation using user', options.user
sourceforge_web_synchro( SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp )
print 'Completed documentation upload'
print 'Uploading source and documentation tarballs for release using user', options.user
sourceforge_release_tarball( SOURCEFORGE_PROJECT,
[source_tarball_path, doc_tarball_path],
user=options.user, sftp=options.sftp )
print 'Source and doc release tarballs uploaded'
else:
print 'No upload user specified. Web site and download tarbal were not uploaded.'
print 'Tarball can be found at:', doc_tarball_path
# Set next version number and commit
set_version( next_version )
svn_commit( 'Released ' + release_version )
else:
sys.stderr.write( msg + '\n' )
if __name__ == '__main__':
main()
| 41.059621 | 124 | 0.620883 | [
"MIT"
] | csyzzkdcz/WrinkledTensionFields | external/jsoncppWrapper/makerelease.py | 15,151 | Python |
"""
Getting started tutorial
========================
In this introductory example, you will see how to use the :code:`spikeinterface` to perform a full electrophysiology analysis.
We will first create some simulated data, and we will then perform some pre-processing, run a couple of spike sorting
algorithms, inspect and validate the results, export to Phy, and compare spike sorters.
"""
import matplotlib.pyplot as plt
##############################################################################
# The spikeinterface module by itself import only the spikeinterface.core submodule
# which is not useful for end user
import spikeinterface
##############################################################################
# We need to import one by one different submodules separately (preferred).
# There are 5 modules:
#
# - :code:`extractors` : file IO
# - :code:`toolkit` : processing toolkit for pre-, post-processing, validation, and automatic curation
# - :code:`sorters` : Python wrappers of spike sorters
# - :code:`comparison` : comparison of spike sorting output
# - :code:`widgets` : visualization
import spikeinterface as si # import core only
import spikeinterface.extractors as se
import spikeinterface.toolkit as st
import spikeinterface.sorters as ss
import spikeinterface.comparison as sc
import spikeinterface.widgets as sw
##############################################################################
# We can also import all submodules at once with this
# this internally import core+extractors+toolkit+sorters+comparison+widgets+exporters
#
# This is useful for notebooks but this is a more heavy import because internally many more dependency
# are imported (scipy/sklearn/networkx/matplotlib/h5py...)
import spikeinterface.full as si
##############################################################################
# First, let's download a simulated dataset from the
# 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data' repo
#
# Then we can open it. Note that `MEArec <https://mearec.readthedocs.io>`_ simulated file
# contains both "recording" and a "sorting" object.
local_path = si.download_dataset(remote_path='mearec/mearec_test_10s.h5')
recording, sorting_true = se.read_mearec(local_path)
print(recording)
print(sorting_true)
##############################################################################
# :code:`recording` is a :code:`RecordingExtractor` object, which extracts information about channel ids, channel locations
# (if present), the sampling frequency of the recording, and the extracellular traces. :code:`sorting_true` is a
# :code:`SortingExtractor` object, which contains information about spike-sorting related information, including unit ids,
# spike trains, etc. Since the data are simulated, :code:`sorting_true` has ground-truth information of the spiking
# activity of each unit.
#
# Let's use the :code:`widgets` module to visualize the traces and the raster plots.
w_ts = sw.plot_timeseries(recording, time_range=(0, 5))
w_rs = sw.plot_rasters(sorting_true, time_range=(0, 5))
##############################################################################
# This is how you retrieve info from a :code:`RecordingExtractor`...
channel_ids = recording.get_channel_ids()
fs = recording.get_sampling_frequency()
num_chan = recording.get_num_channels()
num_seg = recording.get_num_segments()
print('Channel ids:', channel_ids)
print('Sampling frequency:', fs)
print('Number of channels:', num_chan)
print('Number of segments:', num_seg)
##############################################################################
# ...and a :code:`SortingExtractor`
num_seg = recording.get_num_segments()
unit_ids = sorting_true.get_unit_ids()
spike_train = sorting_true.get_unit_spike_train(unit_id=unit_ids[0])
print('Number of segments:', num_seg)
print('Unit ids:', unit_ids)
print('Spike train of first unit:', spike_train)
##################################################################
# :code:`spikeinterface` internally uses the :code:`probeinterface`
# to handle Probe and ProbeGroup.
# So any probe in the probeinterface collections can be download
# and set to a Recording object.
# In this case, the MEArec dataset already handles a Probe and we don't need to set it.
probe = recording.get_probe()
print(probe)
from probeinterface.plotting import plot_probe
plot_probe(probe)
##############################################################################
# Using the :code:`toolkit`, you can perform preprocessing on the recordings.
# Each pre-processing function also returns a :code:`RecordingExtractor`,
# which makes it easy to build pipelines. Here, we filter the recording and
# apply common median reference (CMR).
# All theses preprocessing steps are "lazy". The computation is done on demand when we call
# `recording.get_traces(...)` or when we save the object to disk.
recording_cmr = recording
recording_f = st.bandpass_filter(recording, freq_min=300, freq_max=6000)
print(recording_f)
recording_cmr = st.common_reference(recording_f, reference='global', operator='median')
print(recording_cmr)
# this computes and saves the recording after applying the preprocessing chain
recording_preprocessed = recording_cmr.save(format='binary')
print(recording_preprocessed)
##############################################################################
# Now you are ready to spike sort using the :code:`sorters` module!
# Let's first check which sorters are implemented and which are installed
print('Available sorters', ss.available_sorters())
print('Installed sorters', ss.installed_sorters())
##############################################################################
# The :code:`ss.installed_sorters()` will list the sorters installed in the machine.
# We can see we have HerdingSpikes and Tridesclous installed.
# Spike sorters come with a set of parameters that users can change.
# The available parameters are dictionaries and can be accessed with:
print(ss.get_default_params('herdingspikes'))
print(ss.get_default_params('tridesclous'))
##############################################################################
# Let's run herdingspikes and change one of the parameter, say, the detect_threshold:
sorting_HS = ss.run_herdingspikes(recording=recording_preprocessed, detect_threshold=4)
print(sorting_HS)
##############################################################################
# Alternatively we can pass full dictionary containing the parameters:
other_params = ss.get_default_params('herdingspikes')
other_params['detect_threshold'] = 5
# parameters set by params dictionary
sorting_HS_2 = ss.run_herdingspikes(recording=recording_preprocessed, output_folder="redringspikes_output2",
**other_params)
print(sorting_HS_2)
##############################################################################
# Let's run tridesclous as well, with default parameters:
sorting_TDC = ss.run_tridesclous(recording=recording_preprocessed)
##############################################################################
# The :code:`sorting_HS` and :code:`sorting_TDC` are :code:`SortingExtractor`
# objects. We can print the units found using:
print('Units found by herdingspikes:', sorting_HS.get_unit_ids())
print('Units found by tridesclous:', sorting_TDC.get_unit_ids())
##############################################################################
# :code:`spikeinterface` provides a efficient way to extractor waveform snippets from paired recording/sorting objects.
# The :code:`WaveformExtractor` class samples some spikes (:code:`max_spikes_per_unit=500`) for each cluster and stores
# them on disk. These waveforms per cluster are helpful to compute the average waveform, or "template", for each unit
# and then to compute, for example, quality metrics.
we_TDC = si.WaveformExtractor.create(recording_preprocessed, sorting_TDC, 'waveforms', remove_if_exists=True)
we_TDC.set_params(ms_before=3., ms_after=4., max_spikes_per_unit=500)
we_TDC.run(n_jobs=-1, chunk_size=30000)
print(we_TDC)
unit_id0 = sorting_TDC.unit_ids[0]
wavefroms = we_TDC.get_waveforms(unit_id0)
print(wavefroms.shape)
template = we_TDC.get_template(unit_id0)
print(template.shape)
##############################################################################
# Once we have the `WaveformExtractor` object
# we can post-process, validate, and curate the results. With
# the :code:`toolkit.postprocessing` submodule, one can, for example,
# get waveforms, templates, maximum channels, PCA scores, or export the data
# to Phy. `Phy <https://github.com/cortex-lab/phy>`_ is a GUI for manual
# curation of the spike sorting output. To export to phy you can run:
from spikeinterface.exporters import export_to_phy
export_to_phy(we_TDC, './phy_folder_for_TDC',
compute_pc_features=False, compute_amplitudes=True)
##############################################################################
# Then you can run the template-gui with: :code:`phy template-gui phy/params.py`
# and manually curate the results.
##############################################################################
# Quality metrics for the spike sorting output are very important to asses the spike sorting performance.
# The :code:`spikeinterface.toolkit.qualitymetrics` module implements several quality metrics
# to assess the goodness of sorted units. Among those, for example,
# are signal-to-noise ratio, ISI violation ratio, isolation distance, and many more.
# Theses metrics are built on top of WaveformExtractor class and return a dictionary with the unit ids as keys:
snrs = st.compute_snrs(we_TDC)
print(snrs)
isi_violations_rate, isi_violations_count = st.compute_isi_violations(we_TDC, isi_threshold_ms=1.5)
print(isi_violations_rate)
print(isi_violations_count)
##############################################################################
# All theses quality metrics can be computed in one shot and returned as
# a :code:`pandas.Dataframe`
metrics = st.compute_quality_metrics(we_TDC, metric_names=['snr', 'isi_violation', 'amplitude_cutoff'])
print(metrics)
##############################################################################
# Quality metrics can be also used to automatically curate the spike sorting
# output. For example, you can select sorted units with a SNR above a
# certain threshold:
keep_mask = (metrics['snr'] > 7.5) & (metrics['isi_violations_rate'] < 0.01)
print(keep_mask)
keep_unit_ids = keep_mask[keep_mask].index.values
print(keep_unit_ids)
curated_sorting = sorting_TDC.select_units(keep_unit_ids)
print(curated_sorting)
##############################################################################
# The final part of this tutorial deals with comparing spike sorting outputs.
# We can either (1) compare the spike sorting results with the ground-truth
# sorting :code:`sorting_true`, (2) compare the output of two (HerdingSpikes
# and Tridesclous), or (3) compare the output of multiple sorters:
comp_gt_TDC = sc.compare_sorter_to_ground_truth(gt_sorting=sorting_true, tested_sorting=sorting_TDC)
comp_TDC_HS = sc.compare_two_sorters(sorting1=sorting_TDC, sorting2=sorting_HS)
comp_multi = sc.compare_multiple_sorters(sorting_list=[sorting_TDC, sorting_HS],
name_list=['tdc', 'hs'])
##############################################################################
# When comparing with a ground-truth sorting extractor (1), you can get the sorting performance and plot a confusion
# matrix
comp_gt_TDC.get_performance()
w_conf = sw.plot_confusion_matrix(comp_gt_TDC)
w_agr = sw.plot_agreement_matrix(comp_gt_TDC)
##############################################################################
# When comparing two sorters (2), we can see the matching of units between sorters.
# Units which are not matched has -1 as unit id:
comp_TDC_HS.hungarian_match_12
##############################################################################
# or the reverse:
comp_TDC_HS.hungarian_match_21
##############################################################################
# When comparing multiple sorters (3), you can extract a :code:`SortingExtractor` object with units in agreement
# between sorters. You can also plot a graph showing how the units are matched between the sorters.
sorting_agreement = comp_multi.get_agreement_sorting(minimum_agreement_count=2)
print('Units in agreement between Klusta and Mountainsort4:', sorting_agreement.get_unit_ids())
w_multi = sw.plot_multicomp_graph(comp_multi)
plt.show()
| 44.669039 | 126 | 0.654557 | [
"MIT"
] | Dradeliomecus/spikeinterface | examples/getting_started/plot_getting_started.py | 12,555 | Python |
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
__all__ = [
'BaseFilterElementWdg',
'SelectFilterElementWdg',
'TextFilterElementWdg',
'KeywordFilterElementWdg',
'DateFilterElementWdg',
'DateRangeFilterElementWdg',
'LoginFilterElementWdg',
'MultiSelectFilterElementWdg',
'MultiFieldFilterElementWdg',
'CompoundValueFilterElementWdg',
'ExpressionFilterElementWdg',
'ReplaceWithValueExpressionFilterElementWdg',
'ButtonFilterElementWdg',
'CheckboxFilterElementWdg'
]
import re
import datetime
import sys
from dateutil.relativedelta import relativedelta
from dateutil import parser
from pyasm.common import Common, TacticException, SetupException, Date
from pyasm.biz import Project
from pyasm.web import DivWdg, SpanWdg, Table, WebContainer
from pyasm.widget import CheckboxWdg, SelectWdg, TextWdg, HiddenWdg
from pyasm.search import Search, SearchException, SearchType
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.input import TextInputWdg, LookAheadTextInputWdg
from .filter_data import FilterData
import six
basestring = six.string_types
class BaseFilterElementWdg(BaseRefreshWdg):
'''represents the base filter'''
def __init__(self, **kwargs):
super(BaseFilterElementWdg, self).__init__(**kwargs)
self.values = {}
self.show_title = False
self.set_flag = False
self.title = None
def is_visible(self):
return True
def set_show_title(self, flag):
self.show_title = flag
def get_title_wdg(self):
title_div = DivWdg()
name = self.get_name()
title = self.get_title()
if not title:
title = name
title = Common.get_display_title(title)
title_div.add("%s " % title )
title_div.add_style("font-weight: bold")
return title_div
"""
def is_set(self):
'''indicates whether this filter has values set that will
oontribute to the search'''
return self.set_flag
"""
def is_set(self):
value = self.values.get("value")
if value:
return True
else:
return False
def get_set_js_action(self):
return r'''
var top = bvr.src_el.getParent(".spt_filter_top");
if (!top) return;
var set_icons = top.getElements(".spt_filter_set");
for (var i = 0; i < set_icons.length; i++) {
icon_name = set_icons[i].getAttribute("spt_element_name");
if (icon_name == bvr.element_name) {
var set_icon = set_icons[i];
if (bvr.src_el.value == '') {
set_icon.setStyle("display", "none");
}
else {
set_icon.setStyle("display", "");
}
break;
}
}
'''
def set_value(self, name, value):
self.values[name] = value
def set_values(self, values):
self.values = values
def alter_search(self, search):
pass
def get_display(self):
pass
def set_title(self, title):
self.title = title
def get_title(self):
return self.title
class SelectFilterElementWdg(BaseFilterElementWdg):
def init(self):
expression = self.kwargs.get("column")
self.multi_search_types = False
if not expression:
return
parts = expression.split(".")
search_types = parts[:-1]
self.multi_search_types = False
if len(search_types) > 1:
self.multi_search_types = True
def is_set(self):
value = self.values.get("value")
if value:
return True
else:
if not self.values and self.kwargs.get('default'):
return True
else:
return False
def alter_search(self, search):
expression = self.kwargs.get("column")
if not expression:
return
parts = expression.split(".")
search_types = parts[:-1]
column = parts[-1]
value = self.values.get("value")
if not Common.is_python3:
if isinstance(value, unicode):
value = value.encode('utf-8','ignore')
elif isinstance(value, basestring):
value = unicode(value, errors='ignore').encode('utf-8')
#print "value: ", value, type(value)
if not value:
default = self.kwargs.get('default')
if not self.values and default:
value = default
else:
return
# op should come from self.values
op = self.values.get("op")
if not op:
op = '='
# go through the hierarchy
search2 = None
sobjects = None
sub_search = None
if len(search_types) > 1:
"""
#TODO: replace the for loop below with this @SEARCH code which should be more efficient
search_types.reverse()
top_search_type = search_types[0]
search_type_str = '.'.join(search_types[1:])
expr = '''@SEARCH(%s["%s","%s"].%s)'''%(top_search_type, column, value, search_type_str)
search2 = Search.eval(expr)
sobjects = search2.get_sobjects()
if not sobjects:
# if the trail ends, then set the null filter
search2.set_null_filter()
"""
search_types.reverse()
top_search_type = search_types[0]
search_type_str = '.'.join(search_types[1:])
if search_type_str:
expr = '''@SEARCH(%s["%s","%s"].%s)'''%(top_search_type, column, value, search_type_str)
sub_search = Search.eval(expr)
'''
for search_type in search_types:
if sobjects == None:
search2 = Search(search_type)
search2.add_filter(column, value, "=")
sobjects = search2.get_sobjects()
else:
if search_type == 'connect':
related_sobjects = []
from pyasm.biz import SObjectConnection
connections = SObjectConnection.get_connections(sobjects)
related_sobjects = SObjectConnection.get_sobjects(connections)
"""
for sobject in sobjects:
connections, sobjects = SObjectConnection.get_connected_sobjects(sobject)
related_sobjects.extend(sobjects)
"""
sobjects = related_sobjects[:]
else:
search2 = Search(search_type)
search2.add_relationship_filters(sobjects)
sobjects = search2.get_sobjects()
if not sobjects:
# if the trail ends, then set the null filter and exit
search.set_null_filter()
return
'''
elif not search_types:
if op == '!=':
search.add_op('begin')
search.add_filter(column, value, op)
search.add_filter(column, None)
search.add_op('or')
elif op in ['~','EQI']:
filters = [[column,'EQI',value]]
search.add_op_filters(filters)
elif op == 'is on':
search.add_day_filter(column, value)
else:
search.add_filter(column, value, op)
return
else:
search_type = search_types[0]
# get all the sobjects at the appropriate hierarchy
try:
search2 = Search(search_type)
if value:
if op == 'exists':
search.add_relationship_search_filter(search2, op=value)
return
else:
#search2.add_filter(column, value, op)
if op == '!=':
search2.add_op('begin')
search2.add_filter(column, value, op)
search2.add_filter(column, None)
search2.add_op('or')
elif op in ['~','EQI']:
filters = [[column,'EQI',value]]
search2.add_op_filters(filters)
elif op == 'is on':
search2.add_day_filter(column, value)
else:
search2.add_filter(column, value, op)
use_multidb = False
search.add_relationship_search_filter(search2, op="in", use_multidb=use_multidb)
except SearchException as e:
raise SearchException('[%s] in simple search definition may have syntax error. %s ' %(expression, e.__str__()))
return
if op == '=':
op = 'in'
else:
op = 'not in'
if sobjects:
search.add_relationship_filters(sobjects, op=op)
elif sub_search:
# column starts with connect
if search_types[-1] =='connect':
search.add_search_filter('id', sub_search, op=op)
else:
search.add_relationship_search_filter(sub_search, op=op)
else:
search.set_null_filter()
def get_display(self):
div = DivWdg()
#div.add_style("width: 350px")
select = SelectWdg("value")
select.add_style("width: 190px")
default_value = self.kwargs.get("default")
# TODO: this is needed for multiple selection, but it is ugly
#select.set_attr("multiple", "1")
#select.add_attr("spt_is_multiple", "true")
# if there is a link search already, don't use default
if self.values and self.kwargs.get('default'):
self.kwargs.pop('default')
select.set_options(self.kwargs)
select.add_empty_option("-- Select --")
name = self.get_name()
select.add_behavior( {
'type': 'change',
'element_name': name,
'cbjs_action': self.get_set_js_action()
} )
# this is needed so they don't cross contaminate
# FIXME: this is probably a bug in SelectWdg
select.set_value('')
value = self.values.get("value")
if not value:
value = default_value
if value:
select.set_value(value)
if self.show_title:
title_div = DivWdg()
div.add(title_div)
title_div.add_style("float: left")
name = self.get_name()
title = self.get_title()
if not title:
title = name
title = Common.get_display_title(title)
title_div.add("%s:" % title )
title_div.add_style("width: 80px")
title_div.add_style("font-weight: bold")
title_div.add_style("margin-left: 15px")
title_div.add_style("padding-top: 2px")
op = self.kwargs.get("op")
if op == 'exists':
div.add(" is ")
div.add(HiddenWdg("op", "exists"))
elif op == 'is on':
div.add(" is on ")
div.add(HiddenWdg("op", "is on"))
elif op in ['~', 'contains']:
div.add(" contains ")
div.add(HiddenWdg("op", "~"))
elif op == 'is':
#TODO: have this style apply to everything else and get rid of  s
op_div = DivWdg('is')
op_div.add_styles('margin-top: 8px; margin-right: 15px')
div.add(op_div)
div.add(HiddenWdg("op", "="))
div.add_style("display: flex")
else:
op_select = SelectWdg("op")
op_select.add_style("width: 100px")
# only support in or not in for multi stypes column
if self.multi_search_types:
op_select.set_option("labels", "is|is not")
op_select.set_option("values", "=|!=")
else:
op_select.set_option("labels", "is|is not|contains")
op_select.set_option("values", "=|!=|~")
value = self.values.get("op")
if value:
op_select.set_value(value)
div.add(op_select)
op_select.add_style("float: left")
op_select.add_style("margin-right: 3px")
div.add(select)
# TEST Dynamic loading of select widget
# Disabling: delay is too long.
"""
parent_div = DivWdg()
div.add(parent_div)
select = SelectWdg("value")
parent_div.add(select)
select.add_style("width: 150px")
select.add_color("background", "background")
value = self.values.get("value")
if value:
select.set_option("values", [value])
select.set_value(value)
select.add_empty_option("-- Select --")
select.add_behavior( {
'type': 'mouseover',
'kwargs': self.kwargs,
'cbjs_action': '''
var top = bvr.src_el.getParent();
var class_name = 'tactic.ui.filter.ReplaceSelectWdg';
spt.panel.load( top, class_name, bvr.kwargs);
'''
} )
"""
return div
class TextFilterElementWdg(SelectFilterElementWdg):
''' derives from SelectFilterElementWdg but with a text box'''
def get_display(self):
div = DivWdg()
text = TextWdg("value")
if not self.kwargs.get('column'):
text.set_attr('readonly','readonly')
text.set_value('Warning: column option not defined')
text.add_class('disabled')
text.add_style("width: 170px")
# if there is a link search already, don't use default
if self.values and self.kwargs.get('default'):
self.kwargs.pop('default')
text.set_options(self.kwargs)
name = self.get_name()
text.add_behavior( {
'type': 'blur',
'element_name': name,
'cbjs_action': self.get_set_js_action()
} )
text.add_behavior( {
'type': 'keyup',
'cbjs_action': '''
var key = evt.key;
if (key == 'enter') {
spt.dg_table.search_cbk( {}, {src_el: bvr.src_el} );
}
''' } )
value = self.values.get("value")
if value:
text.set_value(value)
if self.show_title:
title_div = DivWdg()
div.add(title_div)
title_div.add_style("float: left")
name = self.get_name()
title = self.get_title()
if not title:
title = name
# only do this filtering to the name
title = Common.get_display_title(title)
title_div.add("%s:" % title )
title_div.add_style("width: 80px")
title_div.add_style("font-weight: bold")
title_div.add_style("margin-left: 15px")
title_div.add_style("padding-top: 2px")
op = self.kwargs.get("op")
if op == 'exists':
div.add(" is ")
div.add(HiddenWdg("op", "exists"))
elif op in ['~', 'contains']:
div.add(" contains ")
div.add(HiddenWdg("op", "~"))
else:
op_select = SelectWdg("op")
op_select.set_option("labels", "is|is not|contains")
op_select.set_option("values", "=|!=|~")
value = self.values.get("op")
if value:
op_select.set_value(value)
op_select.add_style("margin-right: 3px")
op_select.add_style("float: left")
div.add(op_select)
div.add(text)
return div
__all__.append("ReplaceSelectWdg")
class ReplaceSelectWdg(BaseRefreshWdg):
def get_display(self):
select = SelectWdg("value")
select.add_style("width: 150px")
# TODO: this is needed for multiple selection, but it is ugly
#select.set_attr("multiple", "1")
#select.add_attr("spt_is_multiple", "true")
# if there is a link search already, don't use default
#if self.values and self.kwargs.get('default'):
# self.kwargs.pop('default')
select.set_options(self.kwargs)
select.add_empty_option("-- Select --")
"""
select.add_behavior( {
'type': 'load',
'cbjs_action': '''
bvr.src_el.focus();
'''
} )
"""
select.add_behavior( {
'type': 'change',
'cbjs_action': self.get_set_js_action()
} )
# this is needed so they don't cross contaminate
# FIXME: this is probably a bug in SelectWdg
select.set_value('')
#value = self.values.get("value")
#if value:
# select.set_value(value)
return select
class KeywordFilterElementWdg(BaseFilterElementWdg):
def init(self):
self.overall_search_type = ''
self.columns = []
self.look_ahead_columns = []
self.relevant = self.get_option("relevant")
self.mode = self.get_option("mode")
self.keyword_search_type = ''
self.keyword_map_search_type = ''
if self.mode == 'keyword_tree':
self.keyword_search_type = self.get_option("keyword_search_type") or 'workflow/base_keyword'
self.keyword_map_search_type = self.get_option("keyword_map_search_type") or 'workflow/keyword_map'
self.cross_db = self.get_option("cross_db") =='true'
column = self.get_option("column")
full_text_column = self.get_option("full_text_column")
if column:
if self.mode=='global':
raise SetupException('You are advised to use [keyword] mode since you have specified the column option.')
self.columns = column.split('|')
self.look_ahead_columns = self.columns[:]
if full_text_column:
self.columns = [full_text_column]
self.case_sensitive = self.kwargs.get("case_sensitive") in ['true',True]
self.do_search = self.kwargs.get("do_search")
self.script_path = self.kwargs.get("script_path")
if not self.mode:
self.mode = "keyword"
# TODO: this is dependent on the default database and not
# on the database that may actually be searched on
from pyasm.search import Sql
database_type = Sql.get_default_database_type()
has_index = False
if database_type == 'PostgreSQL':
database_version = Sql.get_default_database_version()
major = database_version[0]
minor = database_version[1]
if major >= 9:
has_index = True
elif major < 8:
has_index = False
else:
if minor >= 4:
has_index = True
else:
has_index = False
self.has_index = has_index
def alter_search(self, search):
overall_search = search
self.overall_search_type = overall_search.get_search_type()
search_type = self.overall_search_type
search = Search(self.overall_search_type)
value = self.values.get("value")
if not value:
return
name = self.get_name()
if not self.columns:
self.columns = [name]
partial = self.values.get("partial") == 'on'
try:
value.encode('ascii')
except UnicodeEncodeError:
is_ascii = False
else:
is_ascii = True
# keywords in a list is treated with AND in full-text search
# which is usually preferred in global search, it may be reassigned as a string in keyword mode
tmp_keywords = value.split(" ")
keywords = []
for keyword in tmp_keywords:
if not keyword or len(keyword) == 1:
continue
keywords.append(keyword)
if search_type == 'sthpw/sobject_list':
column = "keywords"
project_code = Project.get_project_code()
overall_search.add_filter("project_code", project_code)
if self.has_index and is_ascii:
if partial:
overall_search.add_op("begin")
overall_search.add_text_search_filter(column, keywords)
overall_search.add_keyword_filter(column, keywords)
overall_search.add_op("or")
else:
overall_search.add_text_search_filter(column, keywords)
else:
overall_search.add_keyword_filter(column, keywords)
# this is the default when the xml is just <element name='keywords'/>
elif self.mode == 'global':
column = "keywords"
search2 = Search("sthpw/sobject_list")
project_code = Project.get_project_code()
search2.add_filter("project_code", project_code)
search2.add_filter("search_type", search_type)
if self.has_index and is_ascii:
if partial:
search2.add_op("begin")
search2.add_text_search_filter(column, keywords)
search2.add_keyword_filter(column, keywords)
search2.add_op("or")
else:
search2.add_text_search_filter(column, keywords)
else:
search2.add_keyword_filter(column, keywords)
refs = search2.get_sobjects()
overall_search.add_filters("id", [x.get_value("search_id") for x in refs])
elif self.mode == 'keyword_tree':
if self.cross_db:
sub_search_list = []
else:
overall_search.add_op('begin')
# single col does not matter
# we should just use the AND logic for single_col or multi keywords column.
# Drawback is that multiple columns defined for the same sType may cause a return of 0 result
# if words from multi columns are used in the search. This is in line with partial_op = 'and' for
# db not supporting full text search
partial_op = 'or'
# in keyword_tree mode where there could be multi column
# keywords is kept as a string to maintain OR full-text search
value = value.replace(",", " ")
# find self parent tree value
project = Project.get()
sql = project.get_sql()
stmts = []
value_idx = []
impl = project.get_database_impl()
op = self.values.get("cte_op")
if not op:
op = "keyword"
# this is the hardcoded filter exact match of name
# or partial match with alias column
if op == 'child':
tbl = "p1"
elif op == 'both':
tbl = "p1"
filter_expr1 = [["begin"],["%s.name"%tbl,"%s"%value],["%s.alias"%tbl,"like","%%%s%%"%value],["or"]]
tbl = "p2"
filter_expr2 = [["begin"],["%s.name"%tbl,"%s"%value],["%s.alias"%tbl,"like","%%%s%%"%value],["or"]]
else:
tbl = "p2"
filter_expr = [["begin"],["%s.name"%tbl,"%s"%value],["%s.alias"%tbl,"like","%%%s%%"%value],["or"]]
if op == 'parent':
stmts.append(impl.get_parent_cte(filter_expr))
value_idx.append(1)
elif op == 'child':
stmts.append(impl.get_child_cte(filter_expr))
value_idx.append(3)
elif op == 'both':
stmts.append(impl.get_parent_cte(filter_expr2))
value_idx.append(1)
stmts.append(impl.get_child_cte(filter_expr1))
value_idx.append(3)
elif op == 'keyword':
pass
original_search = Search(self.keyword_search_type)
original_search.add_op('begin')
original_search.add_filter('name', value)
original_search.add_regex_filter('alias', value, op='EQI')
original_search.add_op('or')
original = original_search.get_sobject()
if original:
value = original.get('name')
# include the original value
keywords_list = [value]
for idx, stmt in enumerate(stmts):
results = sql.do_query(stmt)
for res in results:
if res[value_idx[idx]] not in keywords_list:
keywords_list.append(res[value_idx[idx]])
keywords = keywords_list
for column in self.columns:
if self.cross_db:
search2 = None
sub_search = None
search_type_obj = overall_search.get_search_type_obj()
table = search_type_obj.get_table()
column_type = None
search = Search(overall_search.get_search_type())
local_table = True
if column.find(".") != -1:
parts = column.split(".")
search_types = parts[:-1]
column = parts[-1]
local_table = False
"""
if self.cross_db:
search_types.reverse()
top_search_type = search_types[0]
search_type_str = '.'.join(search_types[1:])
if search_type_str:
expr = '''@SEARCH(%s)'''%(search_type_str)
sub_search = Search.eval(expr)
search2 = Search(top_search_type)
table = SearchType.get(top_search_type).get_table()
column_types = SearchType.get_column_types(top_search_type)
column_type = column_types.get(column)
else:
"""
prev_stype = search_type
for next_stype in search_types:
path = None
# support for path
if ':' in next_stype:
path, next_stype = next_stype.split(':')
search.add_join(next_stype, prev_stype, path=path)
prev_stype = next_stype
table = SearchType.get(next_stype).get_table()
column_types = SearchType.get_column_types(next_stype)
column_type = column_types.get(column)
if self.cross_db:
search2.add_keyword_filter(column, keywords_list, table=table, column_type=column_type, op=partial_op)
# sub_search is not present if it only traverses thru 1 sType
if sub_search:
sub_search.add_relationship_search_filter(search2, op="in")
else:
sub_search = search2
else:
if local_table:
overall_search.add_keyword_filter(column, keywords_list, table=table, column_type=column_type, op=partial_op)
else:
search.add_keyword_filter(column, keywords_list, table=table, column_type=column_type, op=partial_op)
overall_search.add_relationship_search_filter(search, op="in")
if self.cross_db:
sub_search_list.append(sub_search)
if self.cross_db:
rtn_history = False
overall_search.add_op('begin')
for sub_search in sub_search_list:
rtn = overall_search.add_relationship_search_filter(sub_search, op="in", delay_null=True)
if rtn_history == False:
rtn_history = rtn
# if all the sub_search return false, set null filter
if not rtn_history:
overall_search.set_null_filter()
overall_search.add_op('or')
else:
overall_search.add_op('or')
elif self.mode == 'keyword':
if self.cross_db:
sub_search_list = []
else:
overall_search.add_op('begin')
#search.add_op('begin')
# single col does not matter
# we should just use the AND logic for single_col or multi keywords column.
# Drawback is that multiple columns defined for the same sType may cause a return of 0 result
# if words from multi columns are used in the search. This is in line with partial_op = 'and' for
# db not supporting full text search
single_col = len(self.columns) == 1
partial_op = 'and'
# in keyword mode where there could be multi column
# keywords is kept as a string to maintain OR full-text search
value = value.replace(",", " ")
value = re.sub(' +', ' ', value)
keywords = value.strip()
if not keywords:
return
# keywords_list is used for add_keyword_filter()
keywords_list = keywords.split(" ")
single_keyword = len(keywords_list) == 1
if single_col:
if single_keyword:
multi_col_op = 'or' # this doesn't really matter
op = '|' # this doesn't really matter
else: # multi_keyword, single column
multi_col_op = 'or' # this doesn't really matter
op = '&'
else:
if single_keyword:
multi_col_op = 'or'
op = '|' # this doesn't really matter
else:
multi_col_op = 'or'
op = '&'
for column in self.columns:
if self.cross_db:
search2 = None
sub_search = None
# AND logic in full text search will be adopted if keywords
# is a list as opposed to string
if single_col:
keywords = keywords_list
if self.has_index and is_ascii:
search_type_obj = SearchType.get(search_type)
table = search_type_obj.get_table()
search = Search(overall_search.get_search_type())
local_table = True
if column.find(".") != -1:
parts = column.split(".")
search_types = parts[:-1]
column = parts[-1]
local_table = False
if self.cross_db:
search_types.reverse()
top_search_type = search_types[0]
search_type_str = '.'.join(search_types[1:])
if search_type_str:
expr = '''@SEARCH(%s)'''%(search_type_str)
sub_search = Search.eval(expr)
search2 = Search(top_search_type)
table = SearchType.get(top_search_type).get_table()
else:
prev_stype = search_type
for next_stype in search_types:
path = None
# support for path
if ':' in next_stype:
path, next_stype = next_stype.split(':')
search.add_join(next_stype, prev_stype, path=path)
prev_stype = next_stype
table = SearchType.get(next_stype).get_table()
if partial:
if self.cross_db:
search2.add_op("begin")
search2.add_text_search_filter(column, keywords, table=table, op=op)
search2.add_keyword_filter(column, keywords_list, table=table, op=partial_op)
search2.add_op("or")
if sub_search:
sub_search.add_relationship_search_filter(search2, op="in")
else:
sub_search = search2
else:
search.add_op("begin")
search.add_text_search_filter(column, keywords, table=table, op=op)
search.add_keyword_filter(column, keywords_list, table=table, op=partial_op)
search.add_op("or")
overall_search.add_relationship_search_filter(search, op="in")
else:
if self.cross_db:
if not search2:
raise TacticException('If cross_db is set to true, all the columns should be formatted in expression-like format with one or more sTypes: sthpw/task.description')
search2.add_text_search_filter(column, keywords, table=table, op=op)
if sub_search:
sub_search.add_relationship_search_filter(search2, op="in")
else:
sub_search = search2
else:
if local_table:
overall_search.add_text_search_filter(column, keywords, table=table, op=op)
else:
search.add_text_search_filter(column, keywords, table=table, op=op)
overall_search.add_relationship_search_filter(search, op="in")
else:
#value = value.replace(",", " ")
search_type_obj = overall_search.get_search_type_obj()
table = search_type_obj.get_table()
column_type = None
search = Search(overall_search.get_search_type())
local_table = True
if column.find(".") != -1:
parts = column.split(".")
search_types = parts[:-1]
column = parts[-1]
local_table = False
if self.cross_db:
search_types.reverse()
top_search_type = search_types[0]
search_type_str = '.'.join(search_types[1:])
if search_type_str:
expr = '''@SEARCH(%s)'''%(search_type_str)
sub_search = Search.eval(expr)
search2 = Search(top_search_type)
table = SearchType.get(top_search_type).get_table()
column_types = SearchType.get_column_types(top_search_type)
column_type = column_types.get(column)
else:
prev_stype = search_type
for next_stype in search_types:
path = None
# support for path
if ':' in next_stype:
path, next_stype = next_stype.split(':')
search.add_join(next_stype, prev_stype, path=path)
prev_stype = next_stype
table = SearchType.get(next_stype).get_table()
column_types = SearchType.get_column_types(next_stype)
column_type = column_types.get(column)
if self.cross_db:
search2.add_keyword_filter(column, keywords_list, table=table, column_type=column_type, op=partial_op)
# sub_search is not present if it only traverses thru 1 sType
if sub_search:
sub_search.add_relationship_search_filter(search2, op="in")
else:
sub_search = search2
else:
if local_table:
overall_search.add_keyword_filter(column, keywords_list, table=table, column_type=column_type, op=partial_op)
else:
search.add_keyword_filter(column, keywords_list, table=table, column_type=column_type, op=partial_op)
overall_search.add_relationship_search_filter(search, op="in")
if self.cross_db:
sub_search_list.append(sub_search)
#if not self.cross_db:
# search.add_op('or')
#self.search_type = search.get_search_type()
if self.cross_db:
rtn_history = False
overall_search.add_op('begin')
for sub_search in sub_search_list:
rtn = overall_search.add_relationship_search_filter(sub_search, op="in", delay_null=True)
if rtn_history == False:
rtn_history = rtn
# if all the sub_search return false, set null filter
if not rtn_history:
overall_search.set_null_filter()
overall_search.add_op(multi_col_op)
else:
overall_search.add_op(multi_col_op)
else:
raise TacticException('Mode [%s] in keyword search not support' % self.mode)
def get_display(self):
# can predefine a filter_search_type for the look ahead search
self.filter_search_type = self.get_option("filter_search_type")
if not self.filter_search_type:
self.filter_search_type = self.overall_search_type
div = DivWdg()
div.add_style("position: relative")
if self.show_title:
name = self.get_name()
title = self.get_title()
if title == None:
title = name
title = Common.get_display_title(title)
if title:
div.add("<b>%s: </b>" % title )
if self.show_title:
title_div = DivWdg()
div.add(title_div)
title_div.add_style("float: left")
name = self.get_name()
title = self.get_title()
if not title:
title = name
title = Common.get_display_title(title)
title_div.add("%s:" % title )
title_div.add_style("width: 80px")
title_div.add_style("font-weight: bold")
title_div.add_style("margin-left: 15px")
title_div.add_style("padding-top: 2px")
column = None
hint_text = 'any keywords'
search_type = 'sthpw/sobject_list'
if not self.columns and self.mode in ['keyword','keyword_tree']:
name = self.get_name()
self.columns = [name]
# check if column exists
if self.filter_search_type:
exists = SearchType.column_exists(self.filter_search_type, name)
if not exists:
name = "name"
exists = SearchType.column_exists(self.filter_search_type, name)
if not exists:
name = "description"
exists = SearchType.column_exists(self.filter_search_type, name)
if not exists:
raise SetupException("Keyword Filter column [%s] does not exist"%name)
self.columns = [name]
if self.mode in ['keyword','keyword_tree']:
search_type = self.filter_search_type
# clean up the hint text and find the last search_type
hints = []
for column in self.columns:
if column.find(".") != -1:
parts = column.split(".")
hint = parts[-1]
#NOTE: no need to determine sType
#tmp_search_type = parts[-2]
else:
hint = column
hints.append(hint)
hint_text = ', '.join(hints)
if len(hint_text) > 30:
hint_text = '%s...'%hint_text[0:29]
if self.kwargs.get("hint_text"):
hint_text = self.kwargs.get("hint_text")
# search_type is a list matching the column for potential join
width = self.kwargs.get("width")
if not width:
width = "230px"
show_toggle = self.get_option("show_toggle")
if show_toggle in ['true', True]:
#icon = "BS_MENU_DOWN"
icon = "FA_FILTER"
icon_pos = "left"
else:
icon = ""
icon_pos = ""
# NOTE: This calls the refresh twice for some reason
"""
custom_cbk = {
'enter': '''
spt.dg_table.search_cbk( {}, {src_el: bvr.src_el} );
'''
}
"""
text = LookAheadTextInputWdg(
name="value",
do_search=self.do_search,
script_path=self.script_path,
#custom_cbk=custom_cbk,
filter_search_type=self.filter_search_type,
search_type=search_type,
column=self.look_ahead_columns,
relevant = self.relevant,
width = width,
height = "30px",
hint_text=hint_text,
case_sensitive = self.case_sensitive,
icon=icon,
icon_pos=icon_pos,
)
value = self.values.get("value")
if value:
text.set_value(value)
text.set_hidden_value(value)
text.add_behavior( {
'type': 'keyup',
'cbjs_action': '''
var key = evt.key;
if (key == 'enter') {
var top = bvr.src_el.getParent(".spt_input_text_top");
var hidden_el = top.getElement(".spt_text_value");
if (bvr.src_el.value) hidden_el.value = bvr.src_el.value;
spt.dg_table.search_cbk( {}, {src_el: bvr.src_el} );
}
''' } )
name = self.get_name()
text.add_behavior( {
'type': 'change',
'element_name': name,
'cbjs_action': self.get_set_js_action()
} )
div.add(text)
if self.mode == 'keyword_tree':
text.add_style('float','left')
op_select = SelectWdg("cte_op")
op_select.add_style("width: 100px")
# only support in or not in for multi stypes column
op_select.set_option("labels", "keyword|parent|child|both")
op_select.set_option("values", "keyword|parent|child|both")
value = self.values.get("cte_op")
if value:
op_select.set_value(value)
div.add(op_select)
op_select.add_style("float: left")
op_select.add_style("margin-left: 10px")
show_toggle = self.get_option("show_toggle")
if show_toggle in ['true', True]:
icon_wdg = text.get_icon_wdg()
if icon_wdg:
icon_wdg.add_class("spt_search_toggle")
icon_wdg.add_class("hand")
"""
from pyasm.widget import IconWdg
icon_div = DivWdg()
icon = IconWdg("toggle", "BS_CHEVRON_DOWN")
icon_div.add(icon)
icon_div.add_class("hand spt_search_toggle")
icon_div.add_style("position: absolute")
icon_div.add_style("top: 6px")
icon_div.add_style("right: 4px")
div.add(icon_div)
"""
show_partial = self.get_option("show_partial")
if show_partial in ['true', True]:
from pyasm.widget import IconWdg
icon_div = DivWdg()
icon = IconWdg("Match", IconWdg.ARROWHEAD_DARK_DOWN)
icon_div.add(icon)
icon_div.add_class("hand")
icon_div.add_style("position: absolute")
icon_div.add_style("top: 5px")
icon_div.add_style("right: 0")
from tactic.ui.container import DialogWdg
dialog = DialogWdg(show_title=False, show_pointer=True)
dialog.set_as_activator(icon_div, {'x': -150, 'y': 10})
div.add(dialog)
match_div = DivWdg()
match_div.add_style("width: 175px")
dialog.add(match_div)
checkbox = CheckboxWdg("partial")
match_div.add(checkbox)
checkbox.add_attr("title", "Use partial word match (slower)")
match_div.add_style("padding: 10px")
match_div.add_color("color", "color")
match_div.add_color("background", "background")
match_div.add(" Use partial word match")
if self.mode == 'keyword' and self.has_index:
div.add(icon_div)
elif self.mode =='global' and self.has_index:
div.add(icon_div)
else:
# partial is implied otherwise
hidden = HiddenWdg("partial")
div.add(hidden)
hidden.set_value("on")
return div
class DateFilterElementWdg(BaseFilterElementWdg):
'''This filter uses a subselect so that it can look for dates that
are on another table than the main serach'''
def alter_search(self, search):
expression = self.kwargs.get("column")
if not expression:
expression = self.get_name()
search_types = []
if expression.find(".") != -1:
#search_type, date_col = expression.split(".")
parts = expression.split(".")
search_types = parts[:-1]
if search_types:
search_type = '.'.join(search_types)
date_col = parts[-1]
else:
search_type = search.get_search_type()
date_col = expression
start_date = self.values.get("start_date")
end_date = self.values.get("end_date")
if not start_date and not end_date:
return
if isinstance(start_date, six.string_types) and start_date.startswith("$"):
start_date = parser.parse( Search.eval(start_date) )
if isinstance(end_date, six.string_types) and end_date.startswith("$"):
end_date = parser.parse( Search.eval(end_date) )
from pyasm.common import SPTDate
start_date = SPTDate.add_local_timezone(start_date)
start_date = SPTDate.convert(start_date)
end_date = SPTDate.add_local_timezone(end_date)
end_date = SPTDate.convert(end_date)
# use the expression only if 1 or more search_types defined in column
if search_types:
expr = "@SEARCH(%s)"%search_type
search2 = Search.eval(expr)
else:
search2 = Search(search_type)
search2.add_date_range_filter(date_col, start_date, end_date)
search.add_relationship_search_filter(search2)
def get_display(self):
div = DivWdg()
table = Table()
div.add(table)
table.add_row()
if self.show_title:
title_div = DivWdg()
table.add_cell(title_div)
name = self.get_name()
title = self.get_title()
if not title:
title = name
title = Common.get_display_title(title)
title_div.add("%s:" % title )
title_div.add_style("width: 80px")
title_div.add_style("font-weight: bold")
title_div.add_style("margin-left: 15px")
title_div.add_style("padding-top: 2px")
td = table.add_cell()
op = DivWdg("is")
td.add(op)
td = table.add_cell()
op = DivWdg(" between ")
op.add_style("margin-left: 5px")
td.add(op)
start_date = self.values.get("start_date")
end_date = self.values.get("end_date")
from tactic.ui.widget import CalendarInputWdg
td = table.add_cell()
cal1 = CalendarInputWdg("start_date")
if start_date:
cal1.set_value(start_date)
td.add(cal1)
td = table.add_cell()
spacing = DivWdg(" and ")
td.add(spacing)
td = table.add_cell()
cal2 = CalendarInputWdg("end_date")
if end_date:
cal2.set_value(end_date)
td.add(cal2)
return div
class DateRangeFilterElementWdg(BaseFilterElementWdg):
def alter_search(self, search):
start_col = self.kwargs.get("start_date_col")
end_col = self.kwargs.get("end_date_col")
name = self.get_name()
if not start_col:
start_col = "%s_start_date" % name
if not end_col:
end_col = "%s_end_date" % name
# find a relative search type if needed
parts = start_col.split(".")
search_types = parts[:-1]
start_col = parts[-1]
# find a relative search type if needed
parts = end_col.split(".")
search_types2 = parts[:-1]
end_col = parts[-1]
if search_types != search_types2:
raise SearchException('Search types for start and end column must match')
# just take the first one
if search_types:
#search_type = search_types[0]
search_type = '.'.join(search_types)
else:
search_type = search.get_search_type()
start_date = self.values.get("start_date")
end_date = self.values.get("end_date")
if not start_date and not end_date:
return
from pyasm.common import SPTDate
start_date = SPTDate.add_local_timezone(start_date)
start_date = SPTDate.convert(start_date)
end_date = SPTDate.add_local_timezone(end_date)
end_date = SPTDate.convert(end_date)
operator = self.get_option("op")
if operator != 'not in':
operator = 'in'
# use the expression only if 1 or more search_types defined in column
if search_types:
expr = "@SEARCH(%s)"%search_type
search2 = Search.eval(expr)
else:
search2 = Search(search_type)
search2.add_dates_overlap_filter(start_col, end_col, start_date, end_date, op="in")
search.add_relationship_search_filter(search2, op=operator)
return
"""
# Add in the ability to do something like ...
#select * from sequence where code not in (select distinct sequence_code from shot);
relationship = None
if relationship == 'search_type':
where = '''id not in (select distinct search_id from task where search_type like 'prod/asset?project=%')'''
else:
where = '''id not in (select distinct sequence_code from shot)'''
#search.add_empty_related_filter("sthpw/task", op='not in')
"""
def get_display(self):
div = DivWdg()
name = self.get_name()
title = Common.get_display_title(name)
operator = self.get_option("op")
if operator != 'not in':
operator = 'in'
table = Table()
div.add(table)
table.add_row()
table.add_color("color", "color")
name_div = DivWdg()
#name_div.add("%s " % title)
table.add_cell(name_div)
#div.add_style("border: solid blue 1px")
if operator == 'in':
op = DivWdg("overlap ")
else:
op = DivWdg("does not overlap ")
table.add_cell(op)
from tactic.ui.widget import CalendarInputWdg
cal1 = CalendarInputWdg("start_date")
table.add_cell(cal1)
start_date = self.values.get("start_date")
if start_date:
cal1.set_value(start_date)
table.add_cell(" and ")
cal2 = CalendarInputWdg("end_date")
table.add_cell(cal2)
end_date = self.values.get("end_date")
if end_date:
cal2.set_value(end_date)
return div
class ExpressionFilterElementWdg(BaseFilterElementWdg):
def alter_search(self, search):
#prefix = self.values.get("prefix")
#column = self.values.get("%s_column" % prefix)
if not self.values.get('option'):
return
expr = self.get_option("expression")
# e.g. @SEARCH(vfx/asset['code','EQ','002'])
from pyasm.biz import ExpressionParser
parser = ExpressionParser()
expr_search = parser.eval(expr)
search.add_relationship_search_filter(expr_search)
def get_display(self):
title = self.get_option("title")
if not title:
title = ''
div = SpanWdg()
div.add("%s" % title)
checkbox = CheckboxWdg("option")
checkbox.set_attr("value", "expr_items")
checkbox.set_checked()
cbjs_action = self.get_option("cbjs_action")
if cbjs_action:
checkbox.add_behavior( {
'type': 'click_up',
'propogate_evt': 'true',
'cbjs_action': cbjs_action
})
div.add(checkbox)
return div
# --------------------------------------------------------------------------------------------------------------------
# ReplaceWithValueExpressionFilterElementWdg ...
#
# This is really just the ExpressionFilterElementWdg but with the ability to have a text field entry
# for a value to replace within your expression. Where-ever you place $REPLACE in your expression string
# will be replaced with the value you enter in the text field entry. Below is some example configuration
# XML for this widget ...
#
# <element name='has_subtask_with_lead'>
# <display class='tactic.ui.filter.ReplaceWithValueExpressionFilterElementWdg'>
# <expression>@GET(MMS/subtask['lead_name','$REPLACE'].job_id)</expression>
# <column>id</column>
# <field_size>22</field_size>
# <field_label>matching:</field_label>
# </display>
# </element>
#
class ReplaceWithValueExpressionFilterElementWdg(BaseFilterElementWdg):
def alter_search(self, search):
expr = self.get_option("expression")
column = self.get_option("column")
field_value = self.values.get("field") or ""
from pyasm.biz import ExpressionParser
parser = ExpressionParser()
if field_value or field_value == 0:
results = parser.eval(expr, vars={
"REPLACE": field_value,
"VALUE": field_value
})
if results:
search.add_filters(column, results)
else:
search.set_null_filter()
def get_display(self):
# mode "text" uses a textbox to search, while "select" uses a select dropdown.
mode = self.get_option("mode")
if not mode:
mode = "text"
if mode not in ["select", "text"]:
class_name = self.__class__.__name__
raise TacticException("%s mode option can only be 'select' or 'text'." %class_name)
field_size = self.get_option("field_size")
#if not field_size:
# field_size = "32"
#else:
# field_size = "%s" % field_size
div = SpanWdg()
if mode == "text":
kwargs = {
"name": "field",
"search_type": self.get_option("search_type"),
"column": self.get_option("display_column"),
"value_column": self.get_option("value_column"),
}
if self.get_option("search_type"):
text = LookAheadTextInputWdg(**kwargs)
text.set_name("field")
else:
text = TextInputWdg(name="field")
if field_size:
text.add_attr("size", field_size)
div.add(text)
text.add_behavior( {
'type': 'keyup',
'cbjs_action': '''
var key = evt.key;
if (key == 'enter') {
spt.dg_table.search_cbk( {}, {src_el: bvr.src_el} );
}
''' } )
elif mode == "select":
values = self.get_option("select_values")
labels = self.get_option("select_labels")
select = SelectWdg("field")
select.add_empty_option("-- Select --")
select.set_option("values", values)
select.set_option("labels", labels)
div.add(select)
mod_value = self.get_option('last_modified_days')
if mod_value:
select = SelectWdg("last_modified", label='modified in: ')
#select.add_style("width: 10px")
if mod_value =='true':
mod_value = '10|20|30|60|90'
mod_values = mod_value.split('|')
mod_values = [x.strip() for x in mod_values]
mod_labels = ["last %s Days"%x for x in mod_values]
select.add_empty_option("-- Select --")
select.set_option("values", mod_values)
select.set_option("labels", mod_labels)
div.add(select)
return div
__all__.append("TaskConnectFilterElementWdg")
class TaskConnectFilterElementWdg(ReplaceWithValueExpressionFilterElementWdg):
def alter_search(self, search):
field_value = self.values.get("field")
last_mod_value = None
last_mod_option = self.get_option("last_modified_days")
if last_mod_option:
last_mod_value = self.values.get('last_modified')
search_type = self.get_option("search_type")
if search_type:
search_type = Project.get_full_search_type(search_type)
# This controls what to filter at the end other than id
filter_column = self.get_option("filter_column")
do_search = self.get_option("do_search")
if not field_value:
if do_search =='true':
pass
else:
return
column = self.get_option("column")
assert column
search2 = Search("sthpw/connection")
select = search2.get_select()
from_table = "connection"
to_table = "task"
from_col = "src_search_id"
to_col = "id"
select.add_join(from_table, to_table, from_col, to_col, join="INNER")
prefix = "src_"
prefix2 = "dst_"
search2.add_filter("%ssearch_type" % prefix, "sthpw/task")
# one can supply a search type for searching in the connection table
if not search_type:
search_type = search.get_search_type()
search2.add_filter("%ssearch_type" % prefix2, search_type)
search2.add_column("%ssearch_id" % prefix2)
# use field value
if field_value:
search2.add_filter(column, field_value, table="task")
filters = self.get_option("filters")
if filters:
search2.add_op_filters(filters)
if last_mod_value:
last_mod_value = int(last_mod_value)
today = datetime.datetime.today()
date = today + relativedelta(days=-last_mod_value)
setting = "%Y-%m-%d"
date_value = date.strftime(setting)
search2.add_where( "task.id in (SELECT search_id from status_log where timestamp > '%s')"%date_value )
statement = search2.get_statement()
sql = search2.get_sql()
values = sql.do_query(statement)
values = [x[0] for x in values]
if not filter_column:
filter_column = 'id'
search.add_filters(filter_column, values)
class LoginFilterElementWdg(BaseFilterElementWdg):
def alter_search(self, search):
prefix = self.values.get("prefix")
column = self.values.get("%s_column" % prefix)
option = self.values.get("option")
if option == 'my_items':
search.add_user_filter()
elif option == 'all_items':
pass
def get_display(self):
# FIXME
title = "Jobs"
div = SpanWdg()
div.add("My %s: " % title)
checkbox = CheckboxWdg("option")
checkbox.set_attr("value", "self_items")
checkbox.set_checked()
div.add(checkbox)
div.add(" ")
div.add("All User %s: " % title)
checkbox = CheckboxWdg("option")
checkbox.set_attr("value", "all_items")
div.add(checkbox)
return div
#
# FIXME: hardcoded for MMS
#
class MultiSelectFilterElementWdg(BaseFilterElementWdg):
def alter_search(self, search):
filters = ['MMS/job.job_number_prefix', 'MMS/job.job_number_year', 'MMS/job.job_number']
for filter in filters:
value = self.values.get(filter)
if value:
#search.add_relationship_filter(filter)
search_type, column = filter.split('.')
search.add_filter(column, value)
def get_display(self):
top = SpanWdg()
top.add(" matches ")
filters = ['MMS/job.job_number_prefix', 'MMS/job.job_number_year', 'MMS/job.job_number']
for filter in filters:
text = TextWdg(filter)
text.add_attr("size", "5")
top.add(text)
top.add(" - ")
return top
# This is a generalized version of the MultiSelectFilterElementWdg
#
class MultiFieldFilterElementWdg(BaseFilterElementWdg):
def alter_search(self, search):
field_list_option = self.get_option("field_list")
if not field_list_option:
return
stmt = 'field_info_list = %s' % field_list_option.replace("\n"," ")
try:
exec(stmt)
except:
return
for field_info in field_info_list:
field = field_info.get('field')
op = field_info.get('op')
if not op:
op = '='
if field:
value = self.values.get(field)
to = field_info.get('to')
if to:
if to.endswith('()'):
stmt = 'value = value.%s' % to
else:
# assume casting to other value besides string ...
stmt = 'value = %s(value)' % to
try:
exec(stmt)
except:
# TODO ... proper error message here?
continue
if value:
# print "field is [%s] ... op is [%s] ... to is [%s] ... value is [%s]" % (field, op, to, value)
search.add_filter(field, value, op)
def set_configuration_error(self, top_el, error_message):
top_el.add("Error in Configuration -- %s" % error_message)
top_el.add_styles("color: orange; font-style: italic; font-weight: bold;")
def get_display(self):
top = SpanWdg()
field_list_option = self.get_option("field_list")
if not field_list_option:
self.set_configuration_error( top, "No 'field_list' option provided" )
return top
top.add(" matches ")
stmt = 'field_info_list = %s' % field_list_option.replace("\n"," ")
try:
exec(stmt)
except:
self.set_configuration_error( top, "badly formed 'field_list' option" )
return top
# field_list option should look like this:
# [ {'field': 'job_number_prefix', 'size': '1', 'maxlength': '1', 'label': 'Prefix', 'to': 'upper()' },
# {'field': 'job_number_year', 'size': '2', 'maxlength': '2', 'label': 'Year (2 digits)', 'to': 'int' },
# {'field': 'job_number', 'size': '5', 'maxlength': '5', 'label': 'Number', 'to': 'int' },
# ]
#
# ... you can also specify a specific op in each of the above ... defaults to 'op': '='
default_size = 10
done_first = False
for field_info in field_info_list:
field = field_info.get('field')
if field:
text = TextWdg(field)
size = field_info.get('size')
if not size:
size = default_size
text.add_attr("size", "%s" % size)
maxlength = field_info.get('maxlength')
if maxlength:
text.add_attr("maxlength", "%s" % maxlength)
label = field_info.get('label')
if label:
if done_first:
top.add(" %s: " % label)
else:
top.add("%s: " % label)
top.add(text)
done_first = True
return top
# --------------------------------------------------------------------------------------------------------------------
# CompoundValueFilterElemenetWdg ...
#
# This is used to set up multiple text field entries to build a single value up to compare to a specific
# column ... example is for MMS job number. Below is some example configuration XML for this widget ...
#
# <display class='tactic.ui.filter.CompoundValueFilterElementWdg'>
# <field_list> [
# { 'field': 'job_number_prefix', 'label': 'Prefix', 'size': '1', 'maxlength': '1', 'to': 'upper()' },
# { 'field': 'job_number_year', 'label': 'Year (2 digit)', 'size': '2', 'maxlength': '2', 'to': 'zfill(2)' },
# { 'field': 'job_number', 'label': 'Number', 'size': '5', 'maxlength': '5', 'to': 'zfill(5)' }
# ] </field_list>
# <column>job_number_full</column>
# <compound_value_expr>{job_number_prefix}{job_number_year}-{job_number}</compound_value_expr>
# <op>=</op>
# </display>
#
class CompoundValueFilterElementWdg(BaseFilterElementWdg):
def alter_search(self, search):
field_list_option = self.get_option("field_list")
if not field_list_option:
return
stmt = 'field_info_list = %s' % field_list_option.replace("\n"," ")
try:
exec(stmt)
except:
return
column = self.get_option("column")
if not column:
print("\n")
print("\n")
print("*** ERROR: no column specified for CompoundValueFilterElementWdg")
print("\n")
print("\n")
return
field_map = {}
for field_info in field_info_list:
field = field_info.get('field')
if field:
value = self.values.get(field)
to = field_info.get('to')
if to:
if to.endswith(')'):
stmt = 'value = value.%s' % to
else:
# assume casting to other value besides string ...
stmt = 'value = %s(value)' % to
try:
exec(stmt)
except:
# TODO ... proper error message here?
continue
if value:
field_map[field] = value
compound_value = self.get_option("compound_value_expr")
if not compound_value:
return
for f,v in field_map.iteritems():
compound_value = compound_value.replace( "{%s}" % f, v )
op = self.get_option("op")
if not op:
op = "="
search.add_filter( column, compound_value, op)
def set_configuration_error(self, top_el, error_message):
top_el.add("Error in Configuration -- %s" % error_message)
top_el.add_styles("color: orange; font-style: italic; font-weight: bold;")
def get_display(self):
top = SpanWdg()
field_list_option = self.get_option("field_list")
if not field_list_option:
self.set_configuration_error( top, "No 'field_list' option provided" )
return top
top.add(" matches")
stmt = 'field_info_list = %s' % field_list_option.replace("\n"," ")
try:
exec(stmt)
except:
self.set_configuration_error( top, "badly formed 'field_list' option" )
return top
# field_list option should look like this:
# [ {'field': 'job_number_prefix', 'size': '1', 'maxlength': '1', 'label': 'Prefix', 'to': 'upper()' },
# {'field': 'job_number_year', 'size': '2', 'maxlength': '2', 'label': 'Year (2 digits)', 'to': 'int' },
# {'field': 'job_number', 'size': '5', 'maxlength': '5', 'label': 'Number', 'to': 'int' },
# ]
default_size = 10
for field_info in field_info_list:
field = field_info.get('field')
if field:
text = TextWdg(field)
size = field_info.get('size')
if not size:
size = default_size
text.add_attr("size", "%s" % size)
maxlength = field_info.get('maxlength')
if maxlength:
text.add_attr("maxlength", "%s" % maxlength)
label = field_info.get('label')
if label:
top.add(" %s: " % label)
top.add(text)
return top
class CheckboxFilterElementWdg(BaseFilterElementWdg):
def alter_search(self, search):
keys = []
for key, value in self.values.items():
if not key.startswith("button"):
continue
if value != "on":
continue
keys.append(key)
#if not keys:
# search.set_null_filter()
search.add_op("begin")
for key in keys:
button = self.values.get("button_%s" % key)
option = key.replace("button_", "")
expression = self.get_option(option)
if expression:
sobjects = Search.eval(expression)
ids = [x.get_id() for x in sobjects]
search.add_filters("id", ids)
search.add_op("or")
def get_display(self):
from tactic.ui.widget import ActionButtonWdg
top = self.top
mode = 'checkbox'
titles = self.get_option("options")
actual_titles = []
if titles:
titles = titles.split("|")
actual_titles = self.get_option("titles")
if actual_titles:
actual_titles = actual_titles.split("|")
if len(actual_titles) != len(titles):
raise TacticException('titles count have to match the options count in the Checkbox Filter Element in Simple Search.')
else:
actual_titles = titles[:]
else:
titles = []
table = Table()
table.add_color("color", "color")
top.add(table)
table.add_row()
if mode == 'button':
text = HiddenWdg("button")
top.add(text)
text.add_class("spt_text")
for title in titles:
button = ActionButtonWdg(title=title)
table.add_cell(button)
button.add_behavior( {
'type': 'click_up',
'title': title,
'cbjs_action': self.get_set_js_action()
} )
else:
for i, title in enumerate(titles):
td = table.add_cell()
if i != 0:
div = DivWdg()
div.add_style("float: left")
td.add(div)
div.add_style("height: 30px")
#div.add(" ")
div.add_style("border-style: solid")
div.add_style("border-width: 0px 1px 0px 0px")
div.add_style("margin-right: 15px")
div.add_style("border-color: %s" % div.get_color("border"))
button_title = "button_%s" % title
checkbox = CheckboxWdg(button_title)
td.add(checkbox)
actual_title = actual_titles[i]
td.add(actual_title)
if self.values.get(button_title):
checkbox.set_checked()
return top
class ButtonFilterElementWdg(CheckboxFilterElementWdg):
pass
| 33.705856 | 194 | 0.522472 | [
"EPL-1.0"
] | arunpillaii/TACTIC | src/tactic/ui/filter/filter_element_wdg.py | 74,827 | Python |
"""This module contains the general information for BiosVfExecuteDisableBit ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class BiosVfExecuteDisableBitConsts:
SUPPORTED_BY_DEFAULT_NO = "no"
SUPPORTED_BY_DEFAULT_YES = "yes"
VP_EXECUTE_DISABLE_BIT_DISABLED = "disabled"
VP_EXECUTE_DISABLE_BIT_ENABLED = "enabled"
VP_EXECUTE_DISABLE_BIT_PLATFORM_DEFAULT = "platform-default"
VP_EXECUTE_DISABLE_BIT_PLATFORM_RECOMMENDED = "platform-recommended"
class BiosVfExecuteDisableBit(ManagedObject):
"""This is BiosVfExecuteDisableBit class."""
consts = BiosVfExecuteDisableBitConsts()
naming_props = set([])
mo_meta = MoMeta("BiosVfExecuteDisableBit", "biosVfExecuteDisableBit", "Execute-Disable-Bit", VersionMeta.Version111j, "InputOutput", 0x3f, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-server-policy", "pn-policy"], [u'biosSettings', u'biosVProfile'], [], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"prop_acl": MoPropertyMeta("prop_acl", "propAcl", "ulong", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"supported_by_default": MoPropertyMeta("supported_by_default", "supportedByDefault", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["no", "yes"], []),
"vp_execute_disable_bit": MoPropertyMeta("vp_execute_disable_bit", "vpExecuteDisableBit", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["disabled", "enabled", "platform-default", "platform-recommended"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"propAcl": "prop_acl",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"supportedByDefault": "supported_by_default",
"vpExecuteDisableBit": "vp_execute_disable_bit",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.prop_acl = None
self.sacl = None
self.status = None
self.supported_by_default = None
self.vp_execute_disable_bit = None
ManagedObject.__init__(self, "BiosVfExecuteDisableBit", parent_mo_or_dn, **kwargs)
| 57.789474 | 287 | 0.687007 | [
"Apache-2.0"
] | Curlyfingers/ucsmsdk | ucsmsdk/mometa/bios/BiosVfExecuteDisableBit.py | 3,294 | Python |
"""
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pyrogram import filters
from wbb import app
from wbb.core.decorators.errors import capture_err
__MODULE__ = "WebSS"
__HELP__ = "/webss | .webss [URL] - Take A Screenshot Of A Webpage"
@app.on_message(filters.command("webss"))
@capture_err
async def take_ss(_, message):
try:
if len(message.command) != 2:
return await message.reply_text(
"Give A Url To Fetch Screenshot."
)
url = message.text.split(None, 1)[1]
m = await message.reply_text("**Taking Screenshot**")
await m.edit("**Uploading**")
try:
await app.send_photo(
message.chat.id,
photo=f"https://webshot.amanoteam.com/print?q={url}",
)
except TypeError:
return await m.edit("No Such Website.")
await m.delete()
except Exception as e:
await message.reply_text(str(e))
| 36.888889 | 78 | 0.705823 | [
"MIT"
] | TAMILVIP007/WilliamButcherBot | wbb/modules/webss.py | 1,992 | Python |
import tweepy
from .config import Config
def update_twitter_banner(api: tweepy.API) -> None:
"""Update the twitter banner of the current profile using the image specified in config."""
api.update_profile_banner(Config.IMAGE_PATH)
| 26.777778 | 95 | 0.767635 | [
"MIT"
] | janaSunrise/Spotify-Twitter-Banner | app/twitter.py | 241 | Python |
from torch.optim.lr_scheduler import LambdaLR
from transformers import get_linear_schedule_with_warmup
from exp import ex
def get_no_scheduler(optimizer, num_warmup_steps, num_training_steps):
def lr_lambda(current_step):
return 1
return LambdaLR(optimizer, lr_lambda)
sched_dict = {
'linear': get_linear_schedule_with_warmup,
'none': get_no_scheduler
}
@ex.capture()
def get_scheduler(optimizer, t_total, warmup, scheduler_name, grad_acc_steps):
warmup_steps = int(t_total * warmup)
scheduler = sched_dict[scheduler_name](optimizer, warmup_steps, t_total)
scheduler.accumulated = 0
scheduler.grad_acc_steps = grad_acc_steps
return scheduler | 27.615385 | 79 | 0.747911 | [
"MIT"
] | HS-YN/PanoAVQA | code/optimizer/schedulers.py | 718 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["group_id"] = group_id
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210701preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:documentdb/v20210615:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["group_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 53.00678 | 1,676 | 0.703716 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | 15,637 | Python |
# Copyright (c) 2017 Rocky Bernstein
"""
Parsing for a trepan2/trepan3k debugger
"breakpoint', "list", or "disasm" command arguments
This is a debugger location along with:
- an optional condition parsing for breakpoints commands
- a range or count for "list" commands
"""
from __future__ import print_function
import sys
from spark_parser.ast import AST
from gdbloc.scanner import LocationScanner, ScannerError
from spark_parser import GenericASTBuilder, DEFAULT_DEBUG
class LocationError(Exception):
def __init__(self, text, text_cursor):
self.text = text
self.text_cursor = text_cursor
def __str__(self):
return self.text + "\n" + self.text_cursor
class LocationParser(GenericASTBuilder):
"""Location parsing as used in trepan2 and trepan3k
for list, breakpoint, and assembly commands
Note: function parse() comes from GenericASTBuilder
"""
def __init__(self, start_nt, text, debug=DEFAULT_DEBUG):
super(LocationParser, self).__init__(AST, start_nt, debug=debug)
self.debug = debug
self.text = text
def error(self, tokens, index):
token = tokens[index]
if self.debug.get('local_print', False):
print(self.text)
print(' ' * (token.offset + len(str(token.value))) + '^')
print("Syntax error at or near token '%s'" % token.value)
if 'context' in self.debug and self.debug['context']:
super(LocationParser, self).error(tokens, index)
raise LocationError(self.text,
' ' * (token.offset + len(str(token.value))) + '^')
def nonterminal(self, nt, args):
has_len = hasattr(args, '__len__')
collect = ('tokens',)
if nt in collect:
#
# Collect iterated thingies together.
#
rv = args[0]
for arg in args[1:]:
rv.append(arg)
if (has_len and len(args) == 1 and
hasattr(args[0], '__len__') and len(args[0]) == 1):
# Remove singleton derivations
rv = GenericASTBuilder.nonterminal(self, nt, args[0])
del args[0] # save memory
else:
rv = GenericASTBuilder.nonterminal(self, nt, args)
return rv
##########################################################
# Expression grammar rules. Grammar rule functions
# start with the name p_ and are collected automatically
##########################################################
def p_bp_location(self, args):
'''
bp_start ::= opt_space location_if opt_space
'''
# "disasm" command range which might refer to locations, ranges, and addresses
def p_asm_range(self, args):
'''
arange_start ::= opt_space arange
arange ::= range
arange ::= addr_location opt_space COMMA opt_space NUMBER
arange ::= addr_location opt_space COMMA opt_space OFFSET
arange ::= addr_location opt_space COMMA opt_space ADDRESS
arange ::= location opt_space COMMA opt_space ADDRESS
arange ::= addr_location opt_space COMMA
arange ::= addr_location
# Unlike ranges, We don't allow ending at an address
# arange ::= COMMA opt_space addr_location
addr_location ::= location
addr_location ::= ADDRESS
'''
# "list" command range which may refer to locations
def p_list_range(self, args):
'''
range_start ::= opt_space range
range ::= location opt_space COMMA opt_space NUMBER
range ::= location opt_space COMMA opt_space OFFSET
range ::= COMMA opt_space location
range ::= location opt_space COMMA
range ::= location
range ::= DIRECTION
'''
# location that is used in breakpoints, list commands, and disassembly
def p_location(self, args):
'''
opt_space ::= SPACE?
location_if ::= location
location_if ::= location SPACE IF tokens
# Note no space is allowed between FILENAME and NUMBER
location ::= FILENAME COLON NUMBER
location ::= FUNCNAME
# If just a number is given, the the filename is implied
location ::= NUMBER
location ::= METHOD
location ::= OFFSET
# For tokens we accept anything. Were really just
# going to use the underlying string from the part
# after "if". So below we all of the possible tokens
tokens ::= token+
token ::= COLON
token ::= COMMA
token ::= DIRECTION
token ::= FILENAME
token ::= FUNCNAME
token ::= NUMBER
token ::= OFFSET
token ::= SPACE
'''
def parse_location(start_symbol, text, out=sys.stdout,
show_tokens=False, parser_debug=DEFAULT_DEBUG):
assert isinstance(text, str)
tokens = LocationScanner().tokenize(text)
if show_tokens:
for t in tokens:
print(t)
# For heavy grammar debugging
# parser_debug = {'rules': True, 'transition': True, 'reduce': True,
# 'errorstack': True, 'dups': True}
# parser_debug = {'rules': False, 'transition': False, 'reduce': True,
# 'errorstack': 'full', 'dups': False}
parser = LocationParser(start_symbol, text, parser_debug)
parser.check_grammar(frozenset(('bp_start', 'range_start', 'arange_start')))
return parser.parse(tokens)
def parse_bp_location(*args, **kwargs):
return parse_location('bp_start', *args, **kwargs)
def parse_range(*args, **kwargs):
return parse_location('range_start', *args, **kwargs)
def parse_arange(*args, **kwargs):
return parse_location('arange_start', *args, **kwargs)
if __name__ == '__main__':
def doit(fn, line):
try:
ast = fn(line, show_tokens=True)
print(ast)
except ScannerError as e:
print("Scanner error")
print(e.text)
print(e.text_cursor)
except LocationError as e:
print("Parser error at or near")
print(e.text)
print(e.text_cursor)
# FIXME: we should make sure all of the below is in a unit test.
lines = """
/tmp/foo.py:12
12
../foo.py:5
gcd()
foo.py:5 if x > 1
""".splitlines()
for line in lines:
if not line.strip():
continue
print("=" * 30)
print(line)
print("+" * 30)
doit(parse_bp_location, line)
# bad_lines = """
# /tmp/foo.py
# '''/tmp/foo.py'''
# /tmp/foo.py 12
# gcd()
# foo.py if x > 1
# """.splitlines()
# for line in bad_lines:
# if not line.strip():
# continue
# print("=" * 30)
# print(line)
# print("+" * 30)
# doit(parse_bp_location, line)
# lines = """
# 1
# 2,
# ,3
# 4,10
# """.splitlines()
# for line in lines:
# if not line.strip():
# continue
# print("=" * 30)
# print(line)
# print("+" * 30)
# doit(parse_range, line)
# print(ast)
lines = (
"*0",
"*1 ,",
"2 , *10",
"2, 10",
"*3, 10",
"sys.exit() , *20"
)
for line in lines:
line = line.strip()
if not line:
continue
print("=" * 30)
print(line)
print("+" * 30)
doit(parse_arange, line)
| 29.98008 | 82 | 0.562791 | [
"MIT"
] | rocky/python-spark | example/gdb-loc/gdbloc/parser.py | 7,525 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.hooks.cloud_build`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.google.cloud.hooks.cloud_build import CloudBuildHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.hooks.cloud_build`.",
DeprecationWarning, stacklevel=2
)
| 40.310345 | 96 | 0.775021 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 312day/airflow | airflow/contrib/hooks/gcp_cloud_build_hook.py | 1,169 | Python |
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': '[email protected]', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': '[email protected]', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': '[email protected]', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
[email protected]', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': '[email protected]', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
| 33.241262 | 313 | 0.552408 | [
"Apache-2.0"
] | terminusdb/woql-client-p | terminusdb_client/woqlclient/woqlClient.py | 77,984 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, keyword_only
from pyspark.ml.param.shared import *
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
_TreeEnsembleModel, _TreeEnsembleParams, _RandomForestParams, _GBTParams, \
_HasVarianceImpurity, _TreeRegressorParams
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, \
JavaPredictor, JavaPredictionModel, _JavaPredictorParams, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
'RandomForestRegressor', 'RandomForestRegressionModel',
'FMRegressor', 'FMRegressionModel']
class _LinearRegressionParams(_JavaPredictorParams, HasRegParam, HasElasticNetParam, HasMaxIter,
HasTol, HasFitIntercept, HasStandardization, HasWeightCol, HasSolver,
HasAggregationDepth, HasLoss):
"""
Params for :py:class:`LinearRegression` and :py:class:`LinearRegressionModel`.
.. versionadded:: 3.0.0
"""
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: auto, normal, l-bfgs.", typeConverter=TypeConverters.toString)
loss = Param(Params._dummy(), "loss", "The loss function to be optimized. Supported " +
"options: squaredError, huber.", typeConverter=TypeConverters.toString)
epsilon = Param(Params._dummy(), "epsilon", "The shape parameter to control the amount of " +
"robustness. Must be > 1.0. Only valid when loss is huber",
typeConverter=TypeConverters.toFloat)
@since("2.3.0")
def getEpsilon(self):
"""
Gets the value of epsilon or its default value.
"""
return self.getOrDefault(self.epsilon)
@inherit_doc
class LinearRegression(JavaPredictor, _LinearRegressionParams, JavaMLWritable, JavaMLReadable):
"""
Linear regression.
The learning objective is to minimize the specified loss function, with regularization.
This supports two kinds of loss:
* squaredError (a.k.a squared loss)
* huber (a hybrid of squared error for relatively small errors and absolute error for \
relatively large ones, and we estimate the scale parameter from training data)
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
Note: Fitting with huber loss only supports none and L2 regularization.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, 2.0, Vectors.dense(1.0)),
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> lr = LinearRegression(regParam=0.0, solver="normal", weightCol="weight")
>>> lr.setMaxIter(5)
LinearRegression...
>>> lr.getMaxIter()
5
>>> lr.setRegParam(0.1)
LinearRegression...
>>> lr.getRegParam()
0.1
>>> lr.setRegParam(0.0)
LinearRegression...
>>> model = lr.fit(df)
>>> model.setFeaturesCol("features")
LinearRegressionModel...
>>> model.setPredictionCol("newPrediction")
LinearRegressionModel...
>>> model.getMaxIter()
5
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> abs(model.predict(test0.head().features) - (-1.0)) < 0.001
True
>>> abs(model.transform(test0).head().newPrediction - (-1.0)) < 0.001
True
>>> abs(model.coefficients[0] - 1.0) < 0.001
True
>>> abs(model.intercept - 0.0) < 0.001
True
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> abs(model.transform(test1).head().newPrediction - 1.0) < 0.001
True
>>> lr.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.numFeatures
1
>>> model.write().format("pmml").save(model_path + "_2")
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35)
"""
super(LinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.LinearRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, loss="squaredError", epsilon=1.35)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearRegressionModel(java_model)
@since("2.3.0")
def setEpsilon(self, value):
"""
Sets the value of :py:attr:`epsilon`.
"""
return self._set(epsilon=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
def setElasticNetParam(self, value):
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self._set(elasticNetParam=value)
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
def setLoss(self, value):
"""
Sets the value of :py:attr:`loss`.
"""
return self._set(lossType=value)
class LinearRegressionModel(JavaPredictionModel, _LinearRegressionParams, GeneralJavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`LinearRegression`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.3.0")
def scale(self):
r"""
The value by which :math:`\|y - X'w\|` is scaled down when loss is "huber", otherwise 1.0.
"""
return self._call_java("scale")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lr_summary = self._call_java("evaluate", dataset)
return LinearRegressionSummary(java_lr_summary)
class LinearRegressionSummary(JavaWrapper):
"""
Linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in "predictions" which gives the predicted value of
the label at each instance.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@property
@since("2.0.0")
def explainedVariance(self):
r"""
Returns the explained variance regression score.
explainedVariance = :math:`1 - \frac{variance(y - \hat{y})}{variance(y)}`
.. seealso:: `Wikipedia explain variation
<http://en.wikipedia.org/wiki/Explained_variation>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("explainedVariance")
@property
@since("2.0.0")
def meanAbsoluteError(self):
"""
Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanAbsoluteError")
@property
@since("2.0.0")
def meanSquaredError(self):
"""
Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanSquaredError")
@property
@since("2.0.0")
def rootMeanSquaredError(self):
"""
Returns the root mean squared error, which is defined as the
square root of the mean squared error.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("rootMeanSquaredError")
@property
@since("2.0.0")
def r2(self):
"""
Returns R^2, the coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("r2")
@property
@since("2.4.0")
def r2adj(self):
"""
Returns Adjusted R^2, the adjusted coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination, Adjusted R^2
<https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark versions.
"""
return self._call_java("r2adj")
@property
@since("2.0.0")
def residuals(self):
"""
Residuals (label - predicted value)
"""
return self._call_java("residuals")
@property
@since("2.0.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions
"""
return self._call_java("numInstances")
@property
@since("2.2.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def devianceResiduals(self):
"""
The weighted residuals, the usual residuals rescaled by the
square root of the instance weights.
"""
return self._call_java("devianceResiduals")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("pValues")
@inherit_doc
class LinearRegressionTrainingSummary(LinearRegressionSummary):
"""
Linear regression training results. Currently, the training summary ignores the
training weights except for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("totalIterations")
class _IsotonicRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol):
"""
Params for :py:class:`IsotonicRegression` and :py:class:`IsotonicRegressionModel`.
.. versionadded:: 3.0.0
"""
isotonic = Param(
Params._dummy(), "isotonic",
"whether the output sequence should be isotonic/increasing (true) or" +
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
featureIndex = Param(
Params._dummy(), "featureIndex",
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
typeConverter=TypeConverters.toInt)
def getIsotonic(self):
"""
Gets the value of isotonic or its default value.
"""
return self.getOrDefault(self.isotonic)
def getFeatureIndex(self):
"""
Gets the value of featureIndex or its default value.
"""
return self.getOrDefault(self.featureIndex)
@inherit_doc
class IsotonicRegression(JavaEstimator, _IsotonicRegressionParams, HasWeightCol,
JavaMLWritable, JavaMLReadable):
"""
Currently implemented using parallelized pool adjacent violators algorithm.
Only univariate (single feature) algorithm supported.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> ir = IsotonicRegression()
>>> model = ir.fit(df)
>>> model.setFeaturesCol("features")
IsotonicRegressionModel...
>>> model.numFeatures()
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.predict(test0.head().features[model.getFeatureIndex()])
0.0
>>> model.boundaries
DenseVector([0.0, 1.0])
>>> ir_path = temp_path + "/ir"
>>> ir.save(ir_path)
>>> ir2 = IsotonicRegression.load(ir_path)
>>> ir2.getIsotonic()
True
>>> model_path = temp_path + "/ir_model"
>>> model.save(model_path)
>>> model2 = IsotonicRegressionModel.load(model_path)
>>> model.boundaries == model2.boundaries
True
>>> model.predictions == model2.predictions
True
.. versionadded:: 1.6.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
"""
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
self._setDefault(isotonic=True, featureIndex=0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return IsotonicRegressionModel(java_model)
def setIsotonic(self, value):
"""
Sets the value of :py:attr:`isotonic`.
"""
return self._set(isotonic=value)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
@since("1.6.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.6.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.6.0")
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
@since("1.6.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
class IsotonicRegressionModel(JavaModel, _IsotonicRegressionParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`IsotonicRegression`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
@property
@since("1.6.0")
def boundaries(self):
"""
Boundaries in increasing order for which predictions are known.
"""
return self._call_java("boundaries")
@property
@since("1.6.0")
def predictions(self):
"""
Predictions associated with the boundaries at the same index, monotone because of isotonic
regression.
"""
return self._call_java("predictions")
@since("3.0.0")
def numFeatures(self):
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
return self._call_java("numFeatures")
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
class _DecisionTreeRegressorParams(_DecisionTreeParams, _TreeRegressorParams, HasVarianceCol):
"""
Params for :py:class:`DecisionTreeRegressor` and :py:class:`DecisionTreeRegressionModel`.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class DecisionTreeRegressor(JavaPredictor, _DecisionTreeRegressorParams, JavaMLWritable,
JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> dt = DecisionTreeRegressor(maxDepth=2)
>>> dt.setVarianceCol("variance")
DecisionTreeRegressor...
>>> model = dt.fit(df)
>>> model.getVarianceCol()
'variance'
>>> model.setLeafCol("leafId")
DecisionTreeRegressionModel...
>>> model.depth
1
>>> model.numNodes
3
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> model.predictLeaf(test0.head().features)
0.0
>>> result.leafId
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtr_path = temp_path + "/dtr"
>>> dt.save(dtr_path)
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtr_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeRegressionModel.load(model_path)
>>> model.numNodes == model2.numNodes
True
>>> model.depth == model2.depth
True
>>> model.transform(test1).head().variance
0.0
>>> df3 = spark.createDataFrame([
... (1.0, 0.2, Vectors.dense(1.0)),
... (1.0, 0.8, Vectors.dense(1.0)),
... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> dt3 = DecisionTreeRegressor(maxDepth=2, weightCol="weight", varianceCol="variance")
>>> model3 = dt3.fit(df3)
>>> print(model3.toDebugString)
DecisionTreeRegressionModel...depth=1, numNodes=3...
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
seed=None, varianceCol=None, weightCol=None, leafCol="",
minWeightFractionPerNode=0.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
leafCol="", minWeightFractionPerNode=0.0)
"""
super(DecisionTreeRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", leafCol="", minWeightFractionPerNode=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", seed=None, varianceCol=None, weightCol=None,
leafCol="", minWeightFractionPerNode=0.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
leafCol="", minWeightFractionPerNode=0.0)
Sets params for the DecisionTreeRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeRegressionModel(java_model)
@since("1.4.0")
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
@since("1.4.0")
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
@since("1.4.0")
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
@since("1.4.0")
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
@since("1.4.0")
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
@since("1.4.0")
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setVarianceCol(self, value):
"""
Sets the value of :py:attr:`varianceCol`.
"""
return self._set(varianceCol=value)
@inherit_doc
class DecisionTreeRegressionModel(_DecisionTreeModel, _DecisionTreeRegressorParams,
JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`DecisionTreeRegressor`.
.. versionadded:: 1.4.0
"""
@since("3.0.0")
def setVarianceCol(self, value):
"""
Sets the value of :py:attr:`varianceCol`.
"""
return self._set(varianceCol=value)
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. note:: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
class _RandomForestRegressorParams(_RandomForestParams, _TreeRegressorParams):
"""
Params for :py:class:`RandomForestRegressor` and :py:class:`RandomForestRegressionModel`.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class RandomForestRegressor(JavaPredictor, _RandomForestRegressorParams, JavaMLWritable,
JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2)
>>> rf.getMinWeightFractionPerNode()
0.0
>>> rf.setSeed(42)
RandomForestRegressor...
>>> model = rf.fit(df)
>>> model.getSeed()
42
>>> model.setLeafCol("leafId")
RandomForestRegressionModel...
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictLeaf(test0.head().features)
DenseVector([0.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0])
>>> model.numFeatures
1
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
0.5
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto", leafCol=", minWeightFractionPerNode=0.0", \
weightCol=None)
"""
super(RandomForestRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestRegressionModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class RandomForestRegressionModel(_TreeEnsembleModel, _RandomForestRegressorParams,
JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`RandomForestRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
class _GBTRegressorParams(_GBTParams, _TreeRegressorParams):
"""
Params for :py:class:`GBTRegressor` and :py:class:`GBTRegressorModel`.
.. versionadded:: 3.0.0
"""
supportedLossTypes = ["squared", "absolute"]
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(supportedLossTypes),
typeConverter=TypeConverters.toString)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@inherit_doc
class GBTRegressor(JavaPredictor, _GBTRegressorParams, JavaMLWritable, JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> gbt = GBTRegressor(maxDepth=2, seed=42, leafCol="leafId")
>>> gbt.setMaxIter(5)
GBTRegressor...
>>> gbt.setMinWeightFractionPerNode(0.049)
GBTRegressor...
>>> gbt.getMaxIter()
5
>>> print(gbt.getImpurity())
variance
>>> print(gbt.getFeatureSubsetStrategy())
all
>>> model = gbt.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictLeaf(test0.head().features)
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> gbtr_path = temp_path + "gbtr"
>>> gbt.save(gbtr_path)
>>> gbt2 = GBTRegressor.load(gbtr_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtr_model"
>>> model.save(model_path)
>>> model2 = GBTRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0))],
... ["label", "features"])
>>> model.evaluateEachIteration(validation, "squared")
[0.0, 0.0, 0.0, 0.0, 0.0]
>>> gbt = gbt.setValidationIndicatorCol("validationIndicator")
>>> gbt.getValidationIndicatorCol()
'validationIndicator'
>>> gbt.getValidationTol()
0.01
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None)
"""
super(GBTRegressor, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
leafCol="", minWeightFractionPerNode=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impuriy="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
Sets params for Gradient Boosted Tree Regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTRegressionModel(java_model)
@since("1.4.0")
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
@since("1.4.0")
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
@since("1.4.0")
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("1.4.0")
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
@since("1.4.0")
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
@since("1.4.0")
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("3.0.0")
def setValidationIndicatorCol(self, value):
"""
Sets the value of :py:attr:`validationIndicatorCol`.
"""
return self._set(validationIndicatorCol=value)
@since("1.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("1.4.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.4.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class GBTRegressionModel(_TreeEnsembleModel, _GBTRegressorParams, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`GBTRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@since("2.4.0")
def evaluateEachIteration(self, dataset, loss):
"""
Method to compute error or loss for every iteration of gradient boosting.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
:param loss:
The loss function used to compute error.
Supported options: squared, absolute
"""
return self._call_java("evaluateEachIteration", dataset, loss)
class _AFTSurvivalRegressionParams(_JavaPredictorParams, HasMaxIter, HasTol, HasFitIntercept,
HasAggregationDepth):
"""
Params for :py:class:`AFTSurvivalRegression` and :py:class:`AFTSurvivalRegressionModel`.
.. versionadded:: 3.0.0
"""
censorCol = Param(
Params._dummy(), "censorCol",
"censor column name. The value of this column could be 0 or 1. " +
"If the value is 1, it means the event has occurred i.e. " +
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
quantileProbabilities = Param(
Params._dummy(), "quantileProbabilities",
"quantile probabilities array. Values of the quantile probabilities array " +
"should be in the range (0, 1) and the array should be non-empty.",
typeConverter=TypeConverters.toListFloat)
quantilesCol = Param(
Params._dummy(), "quantilesCol",
"quantiles column name. This column will output quantiles of " +
"corresponding quantileProbabilities if it is set.",
typeConverter=TypeConverters.toString)
@since("1.6.0")
def getCensorCol(self):
"""
Gets the value of censorCol or its default value.
"""
return self.getOrDefault(self.censorCol)
@since("1.6.0")
def getQuantileProbabilities(self):
"""
Gets the value of quantileProbabilities or its default value.
"""
return self.getOrDefault(self.quantileProbabilities)
@since("1.6.0")
def getQuantilesCol(self):
"""
Gets the value of quantilesCol or its default value.
"""
return self.getOrDefault(self.quantilesCol)
@inherit_doc
class AFTSurvivalRegression(JavaPredictor, _AFTSurvivalRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Accelerated Failure Time (AFT) Model Survival Regression
Fit a parametric AFT survival regression model based on the Weibull distribution
of the survival time.
.. seealso:: `AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0), 1.0),
... (1e-40, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
>>> aftsr = AFTSurvivalRegression()
>>> aftsr.setMaxIter(10)
AFTSurvivalRegression...
>>> aftsr.getMaxIter()
10
>>> aftsr.clear(aftsr.maxIter)
>>> model = aftsr.fit(df)
>>> model.setFeaturesCol("features")
AFTSurvivalRegressionModel...
>>> model.predict(Vectors.dense(6.3))
1.0
>>> model.predictQuantiles(Vectors.dense(6.3))
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
>>> model.transform(df).show()
+-------+---------+------+----------+
| label| features|censor|prediction|
+-------+---------+------+----------+
| 1.0| [1.0]| 1.0| 1.0|
|1.0E-40|(1,[],[])| 0.0| 1.0|
+-------+---------+------+----------+
...
>>> aftsr_path = temp_path + "/aftsr"
>>> aftsr.save(aftsr_path)
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
>>> aftsr2.getMaxIter()
100
>>> model_path = temp_path + "/aftsr_model"
>>> model.save(model_path)
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
>>> model.coefficients == model2.coefficients
True
>>> model.intercept == model2.intercept
True
>>> model.scale == model2.scale
True
.. versionadded:: 1.6.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2)
"""
super(AFTSurvivalRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
self._setDefault(censorCol="censor",
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
maxIter=100, tol=1E-6)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2):
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return AFTSurvivalRegressionModel(java_model)
@since("1.6.0")
def setCensorCol(self, value):
"""
Sets the value of :py:attr:`censorCol`.
"""
return self._set(censorCol=value)
@since("1.6.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("1.6.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@since("1.6.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.6.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("1.6.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.1.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class AFTSurvivalRegressionModel(JavaPredictionModel, _AFTSurvivalRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`AFTSurvivalRegression`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("3.0.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.6.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("1.6.0")
def scale(self):
"""
Model scale parameter.
"""
return self._call_java("scale")
@since("2.0.0")
def predictQuantiles(self, features):
"""
Predicted Quantiles
"""
return self._call_java("predictQuantiles", features)
class _GeneralizedLinearRegressionParams(_JavaPredictorParams, HasFitIntercept, HasMaxIter,
HasTol, HasRegParam, HasWeightCol, HasSolver,
HasAggregationDepth):
"""
Params for :py:class:`GeneralizedLinearRegression` and
:py:class:`GeneralizedLinearRegressionModel`.
.. versionadded:: 3.0.0
"""
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
"the error distribution to be used in the model. Supported options: " +
"gaussian (default), binomial, poisson, gamma and tweedie.",
typeConverter=TypeConverters.toString)
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
"relationship between the linear predictor and the mean of the distribution " +
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
"and sqrt.", typeConverter=TypeConverters.toString)
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
"predictor) column name", typeConverter=TypeConverters.toString)
variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " +
"of the Tweedie distribution which characterizes the relationship " +
"between the variance and mean of the distribution. Only applicable " +
"for the Tweedie family. Supported values: 0 and [1, Inf).",
typeConverter=TypeConverters.toFloat)
linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " +
"Only applicable to the Tweedie family.",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: irls.", typeConverter=TypeConverters.toString)
offsetCol = Param(Params._dummy(), "offsetCol", "The offset column name. If this is not set " +
"or empty, we treat all instance offsets as 0.0",
typeConverter=TypeConverters.toString)
@since("2.0.0")
def getFamily(self):
"""
Gets the value of family or its default value.
"""
return self.getOrDefault(self.family)
@since("2.0.0")
def getLinkPredictionCol(self):
"""
Gets the value of linkPredictionCol or its default value.
"""
return self.getOrDefault(self.linkPredictionCol)
@since("2.0.0")
def getLink(self):
"""
Gets the value of link or its default value.
"""
return self.getOrDefault(self.link)
@since("2.2.0")
def getVariancePower(self):
"""
Gets the value of variancePower or its default value.
"""
return self.getOrDefault(self.variancePower)
@since("2.2.0")
def getLinkPower(self):
"""
Gets the value of linkPower or its default value.
"""
return self.getOrDefault(self.linkPower)
@since("2.3.0")
def getOffsetCol(self):
"""
Gets the value of offsetCol or its default value.
"""
return self.getOrDefault(self.offsetCol)
@inherit_doc
class GeneralizedLinearRegression(JavaPredictor, _GeneralizedLinearRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Generalized Linear Regression.
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
predictor (link function) and a description of the error distribution (family). It supports
"gaussian", "binomial", "poisson", "gamma" and "tweedie" as family. Valid link functions for
each family is listed below. The first link function of each family is the default one.
* "gaussian" -> "identity", "log", "inverse"
* "binomial" -> "logit", "probit", "cloglog"
* "poisson" -> "log", "identity", "sqrt"
* "gamma" -> "inverse", "identity", "log"
* "tweedie" -> power link function specified through "linkPower". \
The default link power in the tweedie family is 1 - variancePower.
.. seealso:: `GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(0.0, 0.0)),
... (1.0, Vectors.dense(1.0, 2.0)),
... (2.0, Vectors.dense(0.0, 0.0)),
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
>>> glr.setRegParam(0.1)
GeneralizedLinearRegression...
>>> glr.getRegParam()
0.1
>>> glr.clear(glr.regParam)
>>> glr.setMaxIter(10)
GeneralizedLinearRegression...
>>> glr.getMaxIter()
10
>>> glr.clear(glr.maxIter)
>>> model = glr.fit(df)
>>> model.setFeaturesCol("features")
GeneralizedLinearRegressionModel...
>>> model.getMaxIter()
25
>>> model.getAggregationDepth()
2
>>> transformed = model.transform(df)
>>> abs(transformed.head().prediction - 1.5) < 0.001
True
>>> abs(transformed.head().p - 1.5) < 0.001
True
>>> model.coefficients
DenseVector([1.5..., -1.0...])
>>> model.numFeatures
2
>>> abs(model.intercept - 1.5) < 0.001
True
>>> glr_path = temp_path + "/glr"
>>> glr.save(glr_path)
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
>>> glr.getFamily() == glr2.getFamily()
True
>>> model_path = temp_path + "/glr_model"
>>> model.save(model_path)
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
>>> model.intercept == model2.intercept
True
>>> model.coefficients[0] == model2.coefficients[0]
True
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
"""
__init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
"""
super(GeneralizedLinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls",
variancePower=0.0, aggregationDepth=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
"""
setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
Sets params for generalized linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GeneralizedLinearRegressionModel(java_model)
@since("2.0.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@since("2.0.0")
def setLink(self, value):
"""
Sets the value of :py:attr:`link`.
"""
return self._set(link=value)
@since("2.2.0")
def setVariancePower(self, value):
"""
Sets the value of :py:attr:`variancePower`.
"""
return self._set(variancePower=value)
@since("2.2.0")
def setLinkPower(self, value):
"""
Sets the value of :py:attr:`linkPower`.
"""
return self._set(linkPower=value)
@since("2.3.0")
def setOffsetCol(self, value):
"""
Sets the value of :py:attr:`offsetCol`.
"""
return self._set(offsetCol=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.2.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GeneralizedLinearRegressionModel(JavaPredictionModel, _GeneralizedLinearRegressionParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`GeneralizedLinearRegression`.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("2.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return GeneralizedLinearRegressionTrainingSummary(
super(GeneralizedLinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_glr_summary = self._call_java("evaluate", dataset)
return GeneralizedLinearRegressionSummary(java_glr_summary)
class GeneralizedLinearRegressionSummary(JavaWrapper):
"""
Generalized linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Predictions output by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in :py:attr:`predictions` which gives the predicted value of each instance.
This is set to a new column name if the original model's `predictionCol` is not set.
"""
return self._call_java("predictionCol")
@property
@since("2.2.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions.
"""
return self._call_java("numInstances")
@property
@since("2.0.0")
def rank(self):
"""
The numeric rank of the fitted linear model.
"""
return self._call_java("rank")
@property
@since("2.0.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedom(self):
"""
The residual degrees of freedom.
"""
return self._call_java("residualDegreeOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedomNull(self):
"""
The residual degrees of freedom for the null model.
"""
return self._call_java("residualDegreeOfFreedomNull")
@since("2.0.0")
def residuals(self, residualsType="deviance"):
"""
Get the residuals of the fitted model by type.
:param residualsType: The type of residuals which should be returned.
Supported options: deviance (default), pearson, working, and response.
"""
return self._call_java("residuals", residualsType)
@property
@since("2.0.0")
def nullDeviance(self):
"""
The deviance for the null model.
"""
return self._call_java("nullDeviance")
@property
@since("2.0.0")
def deviance(self):
"""
The deviance for the fitted model.
"""
return self._call_java("deviance")
@property
@since("2.0.0")
def dispersion(self):
"""
The dispersion of the fitted model.
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
"""
return self._call_java("dispersion")
@property
@since("2.0.0")
def aic(self):
"""
Akaike's "An Information Criterion"(AIC) for the fitted model.
"""
return self._call_java("aic")
@inherit_doc
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
"""
Generalized linear regression training results.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def numIterations(self):
"""
Number of training iterations.
"""
return self._call_java("numIterations")
@property
@since("2.0.0")
def solver(self):
"""
The numeric solver used for training.
"""
return self._call_java("solver")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("pValues")
def __repr__(self):
return self._call_java("toString")
class _FactorizationMachinesParams(_JavaPredictorParams, HasMaxIter, HasStepSize, HasTol,
HasSolver, HasSeed, HasFitIntercept, HasRegParam):
"""
Params for :py:class:`FMRegressor`, :py:class:`FMRegressionModel`, :py:class:`FMClassifier`
and :py:class:`FMClassifierModel`.
.. versionadded:: 3.0.0
"""
factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " +
"which are used to get pairwise interactions between variables",
typeConverter=TypeConverters.toInt)
fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)",
typeConverter=TypeConverters.toBoolean)
miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " +
"set that should be used for one iteration of gradient descent",
typeConverter=TypeConverters.toFloat)
initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString)
@since("3.0.0")
def getFactorSize(self):
"""
Gets the value of factorSize or its default value.
"""
return self.getOrDefault(self.factorSize)
@since("3.0.0")
def getFitLinear(self):
"""
Gets the value of fitLinear or its default value.
"""
return self.getOrDefault(self.fitLinear)
@since("3.0.0")
def getMiniBatchFraction(self):
"""
Gets the value of miniBatchFraction or its default value.
"""
return self.getOrDefault(self.miniBatchFraction)
@since("3.0.0")
def getInitStd(self):
"""
Gets the value of initStd or its default value.
"""
return self.getOrDefault(self.initStd)
@inherit_doc
class FMRegressor(JavaPredictor, _FactorizationMachinesParams, JavaMLWritable, JavaMLReadable):
"""
Factorization Machines learning algorithm for regression.
solver Supports:
* gd (normal mini-batch gradient descent)
* adamW (default)
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.regression import FMRegressor
>>> df = spark.createDataFrame([
... (2.0, Vectors.dense(2.0)),
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>>
>>> fm = FMRegressor(factorSize=2)
>>> fm.setSeed(16)
FMRegressor...
>>> model = fm.fit(df)
>>> model.getMaxIter()
100
>>> test0 = spark.createDataFrame([
... (Vectors.dense(-2.0),),
... (Vectors.dense(0.5),),
... (Vectors.dense(1.0),),
... (Vectors.dense(4.0),)], ["features"])
>>> model.transform(test0).show(10, False)
+--------+-------------------+
|features|prediction |
+--------+-------------------+
|[-2.0] |-1.9989237712341565|
|[0.5] |0.4956682219523814 |
|[1.0] |0.994586620589689 |
|[4.0] |3.9880970124135344 |
+--------+-------------------+
...
>>> model.intercept
-0.0032501766849261557
>>> model.linear
DenseVector([0.9978])
>>> model.factors
DenseMatrix(1, 2, [0.0173, 0.0021], 1)
.. versionadded:: 3.0.0
"""
factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " +
"which are used to get pairwise interactions between variables",
typeConverter=TypeConverters.toInt)
fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)",
typeConverter=TypeConverters.toBoolean)
miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " +
"set that should be used for one iteration of gradient descent",
typeConverter=TypeConverters.toFloat)
initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", seed=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", seed=None)
"""
super(FMRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.FMRegressor", self.uid)
self._setDefault(factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", seed=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", seed=None)
Sets Params for FMRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return FMRegressionModel(java_model)
@since("3.0.0")
def setFactorSize(self, value):
"""
Sets the value of :py:attr:`factorSize`.
"""
return self._set(factorSize=value)
@since("3.0.0")
def setFitLinear(self, value):
"""
Sets the value of :py:attr:`fitLinear`.
"""
return self._set(fitLinear=value)
@since("3.0.0")
def setMiniBatchFraction(self, value):
"""
Sets the value of :py:attr:`miniBatchFraction`.
"""
return self._set(miniBatchFraction=value)
@since("3.0.0")
def setInitStd(self, value):
"""
Sets the value of :py:attr:`initStd`.
"""
return self._set(initStd=value)
@since("3.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("3.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("3.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
class FMRegressionModel(JavaPredictionModel, _FactorizationMachinesParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`FMRegressor`.
.. versionadded:: 3.0.0
"""
@property
@since("3.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("3.0.0")
def linear(self):
"""
Model linear term.
"""
return self._call_java("linear")
@property
@since("3.0.0")
def factors(self):
"""
Model factor term.
"""
return self._call_java("factors")
if __name__ == "__main__":
import doctest
import pyspark.ml.regression
from pyspark.sql import SparkSession
globs = pyspark.ml.regression.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.regression tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| 33.820216 | 100 | 0.605371 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | AjithShetty2489/spark | python/pyspark/ml/regression.py | 87,662 | Python |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from glanceclient.tests import utils
from glanceclient.v2 import image_tags
IMAGE = '3a4560a1-e585-443e-9b39-553b46ec92d1'
TAG = 'tag01'
data_fixtures = {
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, tag_value=TAG): {
'DELETE': (
{},
None,
),
'PUT': (
{},
{
'image_id': IMAGE,
'tag_value': TAG
}
),
}
}
schema_fixtures = {
'tag': {
'GET': (
{},
{'name': 'image', 'properties': {'image_id': {}, 'tags': {}}}
)
}
}
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.api = utils.FakeAPI(data_fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
self.controller = image_tags.Controller(self.api, self.schema_api)
def test_update_image_tag(self):
image_id = IMAGE
tag_value = TAG
self.controller.update(image_id, tag_value)
expect = [
('PUT',
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE,
tag_value=TAG),
{},
None)]
self.assertEqual(expect, self.api.calls)
def test_delete_image_tag(self):
image_id = IMAGE
tag_value = TAG
self.controller.delete(image_id, tag_value)
expect = [
('DELETE',
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE,
tag_value=TAG),
{},
None)]
self.assertEqual(expect, self.api.calls)
| 29.036585 | 79 | 0.563209 | [
"MIT"
] | sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/glanceclient/tests/unit/v2/test_tags.py | 2,381 | Python |
from .impl.cloud.rest_api import RestApi
from .impl.decorators import with_project
from a2ml.api.utils.decorators import error_handler, authenticated
from .impl.model import Model
from .credentials import Credentials
class AugerModel(object):
def __init__(self, ctx):
self.ctx = ctx
self.credentials = Credentials(ctx).load()
self.ctx.rest_api = RestApi(
self.credentials.api_url, self.credentials.token)
@error_handler
@authenticated
@with_project(autocreate=False)
def deploy(self, project, model_id, locally, review, name, algorithm, score, data_path, metadata=None):
model_id = Model(self.ctx, project).deploy(model_id, locally, review, name, algorithm, score, data_path, metadata)
return {'model_id': model_id}
@error_handler
@authenticated
#@with_project(autocreate=False)
def predict(self, filename, model_id, threshold, locally, data, columns, predicted_at, output,
no_features_in_result, score, score_true_data):
if locally:
self.deploy(model_id, locally, review=False, name=None, algorithm=None, score=None, data_path=None)
predicted = Model(self.ctx, project=None).predict(
filename, model_id, threshold, locally, data, columns, predicted_at, output,
no_features_in_result, score, score_true_data)
if filename:
self.ctx.log('Predictions stored in %s' % predicted)
if isinstance(predicted, dict) and 'predicted' in predicted:
return predicted
return {'predicted': predicted}
@error_handler
@authenticated
@with_project(autocreate=False)
def actuals(self, project, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
return Model(self.ctx, project).actuals(model_id, filename, data, columns, actuals_at, actual_date_column, locally)
@error_handler
@authenticated
@with_project(autocreate=False)
def delete_actuals(self, project, model_id, with_predictions=False, begin_date=None, end_date=None, locally=False):
return Model(self.ctx, project).delete_actuals(model_id, with_predictions, begin_date, end_date, locally)
@error_handler
@authenticated
@with_project(autocreate=False)
def review_alert(self, project, model_id, parameters, name):
return Model(self.ctx, project).review_alert(model_id, parameters, name)
@error_handler
@authenticated
@with_project(autocreate=False)
def build_review_data(self, project, model_id, locally, output):
return Model(self.ctx, project).build_review_data(model_id, locally, output)
@error_handler
@authenticated
@with_project(autocreate=False)
def review(self, project, model_id):
return Model(self.ctx, project).review(model_id)
@error_handler
@authenticated
@with_project(autocreate=False)
def undeploy(self, project, model_id, locally):
Model(self.ctx, project).undeploy(model_id, locally)
return {'model_id': model_id}
@error_handler
@authenticated
#@with_project(autocreate=False)
def get_info(self, model_id, locally):
return Model(self.ctx, project=None).get_info(model_id, locally)
@error_handler
@authenticated
#@with_project(autocreate=False)
def update(self, model_id, metadata, locally):
return Model(self.ctx, project=None).update(model_id, metadata, locally)
| 38.32967 | 138 | 0.711869 | [
"Apache-2.0"
] | augerai/a2ml | a2ml/api/auger/model.py | 3,488 | Python |
# Generated by Django 2.0.2 on 2018-03-25 23:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0004_course_tag'),
]
operations = [
migrations.AddField(
model_name='video',
name='url',
field=models.CharField(default='', max_length=200, verbose_name='访问地址'),
),
]
| 21.315789 | 84 | 0.592593 | [
"Apache-2.0"
] | LinXueyuanStdio/TimeCat-Backend-Python | timecat/apps/course/migrations/0005_video_url.py | 413 | Python |
import itertools
import math
import string
import sys
from bisect import bisect_left as bi_l
from bisect import bisect_right as bi_r
from collections import Counter, defaultdict, deque
from heapq import heappop, heappush
from operator import or_, xor
inf = float("inf")
from functools import lru_cache, reduce
sys.setrecursionlimit(10**6)
MOD = 10**9 + 7
# MOD = 998244353
global using_numpy
using_numpy = False
import networkx as nx
import numpy as np
from numba import jit
from scipy import optimize
from scipy.ndimage import distance_transform_cdt
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import (
csgraph_to_dense,
maximum_flow,
minimum_spanning_tree,
shortest_path,
)
from scipy.spatial import ConvexHull
from scipy.special import comb
class Algebra:
class Mint(int):
def __init__(self, n, mod=MOD):
self.value = n
self.mod = mod
def __str__(self):
return f"{self.value}"
def __add__(self, x):
return self.__class__((self.value + x.value) % self.mod)
def __sub__(self, x):
return self.__class__((self.value - x.value) % self.mod)
def __mul__(self, x):
return self.__class__((self.value * x.value) % self.mod)
def __pow__(self, x):
return self.__class__(pow(self.value, x.value, self.mod))
def __lt__(self, x):
return self.value < x.value
def __le__(self, x):
return self.value <= x.value
def __eq__(self, x):
return self.value == x.value
def __ne__(self, x):
return self.value != x.value
def __gt__(self, x):
return self.value > x.value
def __ge__(self, x):
return self.value >= x.value
class SemiGroup:
pass
class Monoid:
pass
class Group:
pass
class SemiRing:
pass
class Ring:
pass
@staticmethod
def identity(n):
if using_numpy:
return np.identity(n, dtype=np.int64)
else:
a = [[0] * n for _ in range(n)]
for i in range(n):
a[i][i] = 1
return a
@staticmethod
def dot(a, b):
if using_numpy:
return np.dot(a, b)
else:
assert len(a[0]) == len(b)
c = [[0] * len(b[0]) for _ in range(len(a))]
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(b)):
c[i][j] += a[i][k] * b[k][j]
return c
@classmethod
def matrix_pow(cls, a, n, mod=10**9 + 7):
m = len(a)
b = cls.identity(m)
while n:
if n & 1:
b = cls.dot(b, a)
n >>= 1
a = cls.dot(a, a)
if using_numpy:
a %= mod
b %= mod
else:
for i in range(m):
for j in range(m):
a[i][j] %= mod
b[i][j] %= mod
return b
@staticmethod
def bitwise_dot(a, b):
if using_numpy:
return np.bitwise_xor.reduce(
a[:, None, :] & b.T[None, :, :], axis=-1
)
else:
assert len(a[0]) == len(b)
c = [[0] * len(b[0]) for _ in range(len(a))]
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(b)):
c[i][j] ^= a[i][k] & b[k][j]
return c
@classmethod
def bitwise_mat_pow(cls, a, n):
if n == 0:
return np.eye(len(a), dtype=np.uint32) * ((1 << 32) - 1)
res = cls.bitwise_mat_pow(a, n // 2)
res = cls.bitwise_dot(res, res)
return cls.bitwise_dot(res, a) if n & 1 else res
class NumberTheory:
def __init__(self, n=2 * 10**6):
self.n = n
self.is_prime_number, self.prime_numbers = self.sieve_of_eratosthenes(
n
)
def sieve_of_eratosthenes(self, n):
if using_numpy:
sieve = np.ones(n + 1, dtype=np.int32)
sieve[:2] = 0
for i in range(2, int(n**0.5) + 1):
if sieve[i]:
sieve[i * 2 :: i] = 0
prime_numbers = np.flatnonzero(sieve)
else:
sieve = [1] * (n + 1)
sieve[0] = sieve[1] = 0
for i in range(2, int(n**0.5) + 1):
if not sieve[i]:
continue
for j in range(i * 2, n + 1, i):
sieve[j] = 0
prime_numbers = [i for i in range(2, n + 1) if sieve[i]]
return sieve, prime_numbers
def prime_factorize(self, n):
res = dict()
if n < 2:
return res
border = int(n**0.5)
for p in self.prime_numbers:
if p > border:
break
while n % p == 0:
res[p] = res.get(p, 0) + 1
n //= p
if n == 1:
return res
res[n] = 1
return res
def prime_factorize_factorial(self, n):
res = dict()
for i in range(2, n + 1):
for p, c in self.prime_factorize(i).items():
res[p] = res.get(p, 0) + c
return res
@classmethod
@lru_cache(maxsize=None)
def gcd(cls, a, b):
return cls.gcd(b, a % b) if b else abs(a)
@classmethod
def lcm(cls, a, b):
return abs(a // cls.gcd(a, b) * b)
@staticmethod
def find_divisors(n):
divisors = []
for i in range(1, int(n**0.5) + 1):
if n % i:
continue
divisors.append(i)
j = n // i
if j != i:
divisors.append(j)
return sorted(divisors)
@staticmethod
def base_convert(n, b):
if not n:
return [0]
res = []
while n:
n, r = divmod(n, b)
if r < 0:
n += 1
r -= b
res.append(r)
return res
mint = Algebra.Mint
class Combinatorics:
def __init__(self, N=10**9, n=10**6, mod=10**9 + 7):
self.mod = mod
self.make_mod_tables(N, n)
@classmethod
@lru_cache(maxsize=None)
def choose(cls, n, r, mod=None): # no mod, or mod ≠ prime
if r > n or r < 0:
return 0
if r == 0:
return 1
res = cls.choose(n - 1, r, mod) + cls.choose(n - 1, r - 1, mod)
if mod:
res %= mod
return res
def cumprod(self, a):
p = self.mod
l = len(a)
sql = int(np.sqrt(l) + 1)
a = np.resize(a, sql**2).reshape(sql, sql)
for i in range(sql - 1):
a[:, i + 1] *= a[:, i]
a[:, i + 1] %= p
for i in range(sql - 1):
a[i + 1] *= a[i, -1]
a[i + 1] %= p
return np.ravel(a)[:l]
def make_mod_tables(self, N, n):
p = self.mod
if using_numpy:
fac = np.arange(n + 1)
fac[0] = 1
fac = self.cumprod(fac)
ifac = np.arange(n + 1, 0, -1)
ifac[0] = pow(int(fac[-1]), p - 2, p)
ifac = self.cumprod(ifac)[n::-1]
n_choose = np.arange(N + 1, N - n, -1)
n_choose[0] = 1
n_choose[1:] = self.cumprod(n_choose[1:]) * ifac[1 : n + 1] % p
else:
fac = [None] * (n + 1)
fac[0] = 1
for i in range(n):
fac[i + 1] = fac[i] * (i + 1) % p
ifac = [None] * (n + 1)
ifac[n] = pow(fac[n], p - 2, p)
for i in range(n, 0, -1):
ifac[i - 1] = ifac[i] * i % p
n_choose = [None] * (n + 1)
n_choose[0] = 1
for i in range(n):
n_choose[i + 1] = n_choose[i] * (N - i) % p
for i in range(n + 1):
n_choose[i] = n_choose[i] * ifac[i] % p
self.fac, self.ifac, self.mod_n_choose = fac, ifac, n_choose
def mod_choose(self, n, r):
p = self.mod
return self.fac[n] * self.ifac[r] % p * self.ifac[n - r] % p
@classmethod
def permutations(cls, a, r=None, i=0):
a = list(a)
n = len(a)
if r is None:
r = n
res = []
if r > n or i > r:
return res
if i == r:
return [tuple(a[:r])]
for j in range(i, n):
a[i], a[j] = a[j], a[i]
res += cls.permutations(a, r, i + 1)
return res
@staticmethod
def combinations(a, r):
a = tuple(a)
n = len(a)
if r > n:
return
indices = list(range(r))
yield a[:r]
while True:
for i in range(r - 1, -1, -1):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield tuple(a[i] for i in indices)
class String:
@staticmethod
def z_algorithm(s):
n = len(s)
a = [0] * n
a[0] = n
l = r = -1
for i in range(1, n):
if r >= i:
a[i] = min(a[i - l], r - i)
while i + a[i] < n and s[i + a[i]] == s[a[i]]:
a[i] += 1
if i + a[i] >= r:
l, r = i, i + a[i]
return a
class GeometryTopology:
class Graph:
def __init__(self, nodes={}, edges={}):
self.nodes = nodes
self.edges = edges
def add_node(self, v, **info):
if not v in self.edges:
self.edges[v] = {}
if v in self.nodes:
return
self.nodes[v] = info
def add_edge(self, u, v, **info):
self.add_node(u)
self.add_node(v)
self.edges[u][v] = info
def get_size(self):
return len(self.nodes)
def dinic(self, src, sink):
def bfs():
lv = {src: 0}
q = deque([src])
while q:
u = q.popleft()
for v, e in self.edges[u].items():
if e["capacity"] == 0 or v in lv:
continue
lv[v] = lv[u] + 1
q.append(v)
return lv
def flow_to_sink(u, flow_in):
if u == sink:
return flow_in
flow = 0
for v, e in self.edges[u].items():
cap = e["capacity"]
if cap == 0 or lv[v] <= lv[u]:
continue
f = flow_to_sink(v, min(flow_in, cap))
if not f:
continue
self.edges[u][v]["capacity"] -= f
if v in self.edges and u in self.edges[v]:
self.edges[v][u]["capacity"] += f
else:
self.add_edge(v, u, capacity=f)
flow_in -= f
flow += f
return flow
flow = 0
while True:
lv = bfs()
if not sink in lv:
return flow
flow += flow_to_sink(src, inf)
def ford_fulkerson(self):
pass
def push_relabel(self):
pass
def floyd_warshall(self):
d = {u: {v: inf for v in self.nodes} for u in self.nodes}
for v in self.nodes:
d[v][v] = 0
for u in self.edges:
for v in self.edges[u]:
d[u][v] = self.edges[u][v]["weight"]
for w in self.nodes:
for u in self.nodes:
for v in self.nodes:
d[u][v] = min(d[u][v], d[u][w] + d[w][v])
return d
def dijkstra(self, src, paths_cnt=False, mod=None):
dist = {v: inf for v in self.nodes}
dist[src] = 0
visited = set()
paths = {v: 0 for v in self.nodes}
paths[src] = 1
q = [(0, src)]
while q:
d, u = heappop(q)
if u in visited:
continue
visited.add(u)
for v, e in self.edges[u].items():
dv = d + e["weight"]
if dv > dist[v]:
continue
elif dv == dist[v]:
paths[v] += paths[u]
if mod:
paths[v] %= mod
continue
paths[v] = paths[u]
dist[v] = dv
heappush(q, (dv, v))
if paths_cnt:
return dist, paths
else:
return dist
def astar(self, src, tgt, heuristic_func):
cost = {v: inf for v in self.nodes}
q = [(heuristic_func(src, tgt), 0, src)]
while q:
s, c, u = heappop(q)
if u == tgt:
return c
if cost[u] != inf:
continue
cost[u] = c
for v, e in self.edges[u].items():
if cost[v] != inf:
continue
h = heuristic_func(v, tgt)
nc = c + e["weight"]
heappush(q, (h + nc, nc, v))
return inf
def init_tree(self, root=0):
self.depth = {root: 0}
self.dist = {root: 0}
self.ancestors = [{root: root}]
stack = [root]
while stack:
u = stack.pop()
for v, e in self.edges[u].items():
if v == self.ancestors[0][u]:
continue
self.dist[v] = self.dist[u] + e["weight"]
self.depth[v] = self.depth[u] + 1
self.ancestors[0][v] = u
stack.append(v)
# tree doubling
for _ in range(max(self.depth).bit_length()):
ancestor = self.ancestors[-1]
nxt_ancestor = {v: ancestor[ancestor[v]] for v in self.nodes}
self.ancestors.append(nxt_ancestor)
def find_dist(self, u, v):
return (
self.dist[u]
+ self.dist[v]
- 2 * self.dist[self.find_lca(u, v)]
)
def find_lca(self, u, v):
du, dv = self.depth[u], self.depth[v]
if du > dv:
u, v = v, u
du, dv = dv, du
d = dv - du
for i in range((d).bit_length()): # up-stream
if d >> i & 1:
v = self.ancestors[i][v]
if v == u:
return v
for i in range(
du.bit_length() - 1, -1, -1
): # find direct child of LCA.
nu, nv = self.ancestors[i][u], self.ancestors[i][v]
if nu == nv:
continue
u, v = nu, nv
return self.ancestors[0][u]
@staticmethod
def triangle_area(p0, p1, p2, signed=False):
x1, y1, x2, y2 = (
p1[0] - p0[0],
p1[1] - p0[1],
p2[0] - p0[0],
p2[1] - p0[1],
)
return (
(x1 * y2 - x2 * y1) / 2 if signed else abs(x1 * y2 - x2 * y1) / 2
)
@classmethod
def intersect(cls, seg1, seg2):
(p1, p2), (p3, p4) = seg1, seg2
t1 = cls.triangle_area(p1, p2, p3, signed=True)
t2 = cls.triangle_area(p1, p2, p4, signed=True)
t3 = cls.triangle_area(p3, p4, p1, signed=True)
t4 = cls.triangle_area(p3, p4, p2, signed=True)
return (t1 * t2 < 0) & (t3 * t4 < 0)
class UnionFind:
def __init__(self, n=10**6):
self.root = list(range(n))
self.height = [0] * n
self.size = [1] * n
def find_root(self, u):
if self.root[u] == u:
return u
self.root[u] = self.find_root(self.root[u])
return self.root[u]
def unite(self, u, v):
ru = self.find_root(u)
rv = self.find_root(v)
if ru == rv:
return
hu = self.height[ru]
hv = self.height[rv]
if hu >= hv:
self.root[rv] = ru
self.size[ru] += self.size[rv]
self.height[ru] = max(hu, hv + 1)
else:
self.root[ru] = rv
self.size[rv] += self.size[ru]
def cumxor(a):
return reduce(xor, a, 0)
def cumor(a):
return reduce(or_, a, 0)
def bit_count(n):
cnt = 0
while n:
cnt += n & 1
n >>= 1
return cnt
class AtCoder:
class ABC001:
@staticmethod
def a():
h1, h2 = map(int, sys.stdin.read().split())
print(h1 - h2)
@staticmethod
def d():
def to_minuites(x):
q, r = divmod(x, 100)
return 60 * q + r
def to_hmform(x):
q, r = divmod(x, 60)
return 100 * q + r
n = int(sys.stdin.readline().rstrip())
term = [0] * 2001
for _ in range(n):
s, e = map(
to_minuites,
map(int, sys.stdin.readline().rstrip().split("-")),
)
s = s // 5 * 5
e = (e + 4) // 5 * 5
term[s] += 1
term[e + 1] -= 1
for i in range(2000):
term[i + 1] += term[i]
res = []
raining = False
for i in range(2001):
if term[i]:
if not raining:
s = i
raining = True
elif raining:
res.append((s, i - 1))
raining = False
for s, e in res:
print(f"{to_hmform(s):04}-{to_hmform(e):04}")
class ABC002:
@staticmethod
def a():
print(max(map(int, sys.stdin.readline().split())))
@staticmethod
def b():
vowels = set("aeiou")
print(
"".join(
[
c
for c in sys.stdin.readline().rstrip()
if c not in vowels
]
)
)
@staticmethod
def c():
print(
GeometryTopology.triangle_area(
*map(int, sys.stdin.readline().split())
)
)
@staticmethod
def d():
n, m = map(int, sys.stdin.readline().split())
edges = set(
(x - 1, y - 1)
for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)
)
print(
max(
len(s)
for i in range(1, 1 << n)
for s in [[j for j in range(n) if i >> j & 1]]
if all(
(x, y) in edges
for x, y in itertools.combinations(s, 2)
)
)
)
@staticmethod
def d_2():
n, m = map(int, sys.stdin.readline().split())
relations = [1 << i for i in range(n)]
for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):
x -= 1
y -= 1
relations[x] |= 1 << y
relations[y] |= 1 << x
res = 0
for i in range(1 << n):
cnt = 0
s = 0
t = (1 << n) - 1
for j in range(n):
if i >> j & 1:
s |= 1 << j
t &= relations[j]
cnt += 1
if t & s == s:
res = max(res, cnt)
print(res)
class ABC003:
@staticmethod
def a():
print((int(sys.stdin.readline().rstrip()) + 1) * 5000)
@staticmethod
def b():
atcoder = set("atcoder")
s, t = sys.stdin.read().split()
print(
all(
s[i] == t[i]
or s[i] == "@"
and t[i] in atcoder
or t[i] == "@"
and s[i] in atcoder
for i in range(len(s))
)
and "You can win"
or "You will lose"
)
@staticmethod
def c():
n, k, *r = map(int, sys.stdin.read().split())
print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))
class ABC004:
@staticmethod
def a():
print(int(sys.stdin.readline().rstrip()) * 2)
@staticmethod
def b():
for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:
print(l[::-1])
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip()) % 30
res = list(range(1, 7))
for i in range(n):
i %= 5
res[i], res[i + 1] = res[i + 1], res[i]
print(*res, sep="")
class ABC005:
@staticmethod
def a():
x, y = map(int, sys.stdin.readline().split())
print(y // x)
@staticmethod
def b():
n, *t = map(int, sys.stdin.read().split())
print(min(t))
@staticmethod
def c():
t = int(sys.stdin.readline().rstrip())
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
m = int(sys.stdin.readline().rstrip())
b = [int(x) for x in sys.stdin.readline().split()]
i = 0
for p in b:
if i == n:
print("no")
return
while p - a[i] > t:
i += 1
if i == n:
print("no")
return
if a[i] > p:
print("no")
return
i += 1
print("yes")
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
d = np.array(
[sys.stdin.readline().split() for _ in range(n)], np.int64
)
s = d.cumsum(axis=0).cumsum(axis=1)
s = np.pad(s, 1)
max_del = np.zeros((n + 1, n + 1), dtype=np.int64)
for y in range(1, n + 1):
for x in range(1, n + 1):
max_del[y, x] = np.amax(
s[y : n + 1, x : n + 1]
- s[0 : n - y + 1, x : n + 1]
- s[y : n + 1, 0 : n - x + 1]
+ s[0 : n - y + 1, 0 : n - x + 1]
)
res = np.arange(n**2 + 1)[:, None]
i = np.arange(1, n + 1)
res = max_del[i, np.minimum(res // i, n)].max(axis=1)
q = int(sys.stdin.readline().rstrip())
p = np.array(sys.stdin.read().split(), dtype=np.int64)
print(*res[p], sep="\n")
class ABC006:
@staticmethod
def a():
n = sys.stdin.readline().rstrip()
if "3" in n:
print("YES")
elif int(n) % 3 == 0:
print("YES")
else:
print("NO")
@staticmethod
def b():
mod = 10007
a = np.eye(N=3, k=-1, dtype=np.int64)
a[0] = 1
n = int(sys.stdin.readline().rstrip())
a = Algebra.matrix_pow(a, n - 1, mod)
print(a[2][0])
@staticmethod
def c():
n, m = map(int, sys.stdin.readline().split())
cnt = [0, 0, 0]
if m == 1:
cnt = [-1, -1, -1]
else:
if m & 1:
m -= 3
cnt[1] += 1
n -= 1
cnt[2] = m // 2 - n
cnt[0] = n - cnt[2]
if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:
print(-1, -1, -1)
else:
print(*cnt, sep=" ")
@staticmethod
def d():
n, *c = map(int, sys.stdin.read().split())
lis = [inf] * n
for x in c:
lis[bi_l(lis, x)] = x
print(n - bi_l(lis, inf))
class ABC007:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print(n - 1)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
if s == "a":
print(-1)
else:
print("a")
@staticmethod
def c():
r, c = map(int, sys.stdin.readline().split())
sy, sx = map(int, sys.stdin.readline().split())
gy, gx = map(int, sys.stdin.readline().split())
sy -= 1
sx -= 1
gy -= 1
gx -= 1
maze = [sys.stdin.readline().rstrip() for _ in range(r)]
queue = deque([(sy, sx)])
dist = np.full((r, c), np.inf)
dist[sy, sx] = 0
while queue:
y, x = queue.popleft()
for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
i += y
j += x
if maze[i][j] == "#" or dist[i, j] != np.inf:
continue
dist[i, j] = dist[y, x] + 1
queue.append((i, j))
print(int(dist[gy, gx]))
@staticmethod
def d():
ng = set([4, 9])
def count(d):
return d if d <= 4 else d - 1
def f(n):
x = [int(d) for d in str(n)]
flg = True
dp = 0
for d in x:
dp = dp * 8 + flg * count(d)
if d in ng:
flg = False
return n - (dp + flg)
a, b = map(int, sys.stdin.readline().split())
print(f(b) - f(a - 1))
class ABC008:
@staticmethod
def a():
s, t = map(int, sys.stdin.readline().split())
print(t - s + 1)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
res = defaultdict(int)
for name in s:
res[name] += 1
print(sorted(res.items(), key=lambda x: x[1])[-1][0])
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
c = n - np.count_nonzero(a[:, None] % a, axis=1)
print(np.sum((c + 1) // 2 / c))
@staticmethod
def d():
w, h, n, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*([iter(xy)] * 2))
@lru_cache(maxsize=None)
def count(x1, y1, x2, y2):
res = 0
for x, y in xy:
if not (x1 <= x <= x2 and y1 <= y <= y2):
continue
cnt = (x2 - x1) + (y2 - y1) + 1
cnt += count(x1, y1, x - 1, y - 1)
cnt += count(x1, y + 1, x - 1, y2)
cnt += count(x + 1, y1, x2, y - 1)
cnt += count(x + 1, y + 1, x2, y2)
res = max(res, cnt)
return res
print(count(1, 1, w, h))
class ABC009:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print((n + 1) // 2)
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
print(sorted(set(a))[-2])
@staticmethod
def c():
n, k = map(int, sys.stdin.readline().split())
s = list(sys.stdin.readline().rstrip())
cost = [1] * n
r = k
for i in range(n - 1):
q = []
for j in range(i + 1, n):
if s[j] < s[i] and cost[i] + cost[j] <= r:
heappush(q, (s[j], cost[i] + cost[j], -j))
if not q:
continue
_, c, j = heappop(q)
j = -j
s[i], s[j] = s[j], s[i]
r -= c
cost[i] = cost[j] = 0
print("".join(s))
@staticmethod
def d():
k, m = map(int, sys.stdin.readline().split())
a = np.array([int(x) for x in sys.stdin.readline().split()])
c = np.array([int(x) for x in sys.stdin.readline().split()])
mask = (1 << 32) - 1
d = np.eye(k, k, -1, dtype=np.uint32) * mask
d[0] = c
if m <= k:
print(a[m - 1])
return
# print(Algebra.bitwise_mat_pow(d, m-k))
# print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0].item())
print(
Algebra.bitwise_dot(
Algebra.bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1)
)[0][0]
)
class ABC010:
@staticmethod
def a():
print(sys.stdin.readline().rstrip() + "pp")
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
tot = 0
for x in a:
c = 0
while x % 2 == 0 or x % 3 == 2:
x -= 1
c += 1
tot += c
print(tot)
@staticmethod
def c():
sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())
x, y = np.array(xy).reshape(-1, 2).T
def dist(x1, y1, x2, y2):
return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
ans = (
"YES"
if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()
else "NO"
)
print(ans)
@staticmethod
def d():
n, g, e = map(int, sys.stdin.readline().split())
p = [int(x) for x in sys.stdin.readline().split()]
x, y = [], []
for _ in range(e):
a, b = map(int, sys.stdin.readline().split())
x.append(a)
y.append(b)
x.append(b)
y.append(a)
for a in p:
x.append(a)
y.append(n)
if not x:
print(0)
return
c = [1] * len(x)
min_cut = maximum_flow(
csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n
).flow_value
print(min_cut)
@staticmethod
def d_2():
n, g, e = map(int, sys.stdin.readline().split())
graph = nx.DiGraph()
graph.add_nodes_from(range(n + 1))
for p in [int(x) for x in sys.stdin.readline().split()]:
graph.add_edge(p, n, capacity=1)
for _ in range(e):
a, b = map(int, sys.stdin.readline().split())
graph.add_edge(a, b, capacity=1)
graph.add_edge(b, a, capacity=1)
print(nx.minimum_cut_value(graph, 0, n))
@staticmethod
def d_3():
n, g, e = map(int, sys.stdin.readline().split())
graph = GeometryTopology.Graph()
for i in range(n + 1):
graph.add_node(i)
for p in [int(x) for x in sys.stdin.readline().split()]:
graph.add_edge(p, n, capacity=1)
for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):
graph.add_edge(a, b, capacity=1)
graph.add_edge(b, a, capacity=1)
print(graph.dinic(0, n))
class ABC011:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print(n % 12 + 1)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
print(s[0].upper() + s[1:].lower())
@staticmethod
def c():
n, *ng = map(int, sys.stdin.read().split())
ng = set(ng)
if n in ng:
print("NO")
else:
r = 100
while n > 0:
if r == 0:
print("NO")
return
for i in range(3, 0, -1):
if (n - i) in ng:
continue
n -= i
r -= 1
break
else:
print("NO")
return
print("YES")
@staticmethod
def d():
n, d, x, y = map(int, sys.stdin.read().split())
x, y = abs(x), abs(y)
if x % d or y % d:
print(0)
return
x, y = x // d, y // d
r = n - (x + y)
if r < 0 or r & 1:
print(0)
return
res = 0
half_p = pow(1 / 2, n)
for d in range(r // 2 + 1): # 0 <= d <= r//2, south
south, north = d, y + d
west = (r - 2 * d) // 2
res += (
half_p
* comb(n, south, exact=True)
* comb(n - south, north, exact=True)
* comb(n - south - north, west, exact=True)
* half_p
)
print(res)
class ABC012:
@staticmethod
def a():
a, b = map(int, sys.stdin.readline().split())
print(b, a)
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
h, n = divmod(n, 3600)
m, s = divmod(n, 60)
print(f"{h:02}:{m:02}:{s:02}")
@staticmethod
def c():
n = 2025 - int(sys.stdin.readline().rstrip())
res = []
for i in range(1, 10):
if n % i != 0 or n // i > 9:
continue
res.append(f"{i} x {n//i}")
print(*sorted(res), sep="\n")
@staticmethod
def d():
n, m, *abt = map(int, sys.stdin.read().split())
a, b, t = np.array(abt).reshape(m, 3).T
res = shortest_path(
csr_matrix((t, (a - 1, b - 1)), (n, n)),
method="FW",
directed=False,
)
print(res.max(axis=-1).min().astype(np.int64))
@staticmethod
def d_2():
n, m, *abt = map(int, sys.stdin.read().split())
graph = GeometryTopology.Graph()
for a, b, t in zip(*[iter(abt)] * 3):
a -= 1
b -= 1
graph.add_edge(a, b, weight=t)
graph.add_edge(b, a, weight=t)
dist = graph.floyd_warshall()
res = min([max(tmp.values()) for tmp in dist.values()])
print(res)
class ABC013:
@staticmethod
def a():
print(ord(sys.stdin.readline().rstrip()) - ord("A") + 1)
@staticmethod
def b():
a, b = map(int, sys.stdin.read().split())
d = abs(a - b)
print(min(d, 10 - d))
@staticmethod
def c():
n, h, a, b, c, d, e = map(int, sys.stdin.read().split())
y = np.arange(n + 1)
x = (n * e - h - (d + e) * y) // (b + e) + 1
np.maximum(x, 0, out=x)
np.minimum(x, n - y, out=x)
print(np.amin(a * x + c * y))
@staticmethod
def d():
n, m, d, *a = map(int, sys.stdin.read().split())
res = list(range(n))
def swap(i, j):
res[i], res[j] = res[j], res[i]
for i in a[::-1]:
swap(i - 1, i)
group = [None] * n
root = [None] * n
index_in_group = [None] * n
for i in range(n):
if root[i] is not None:
continue
group[i] = []
j = i
for cnt in range(1, n + 1):
index_in_group[j] = cnt - 1
group[i].append(j)
j = res[j]
root[j] = i
if j == i:
break
for i in range(n):
g = group[root[i]]
print(g[(index_in_group[i] + d) % len(g)] + 1)
class ABC014:
@staticmethod
def a():
a, b = map(int, sys.stdin.read().split())
print((a + b - 1) // b * b - a)
@staticmethod
def b():
n, x, *a = map(int, sys.stdin.read().split())
print(sum(a[i] for i in range(n) if x >> i & 1))
@staticmethod
def c():
n, *ab = map(int, sys.stdin.read().split())
a, b = np.array(ab).reshape(n, 2).T
res = np.zeros(10**6 + 2, dtype=np.int64)
np.add.at(res, a, 1)
np.subtract.at(res, b + 1, 1)
np.cumsum(res, out=res)
print(res.max())
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
# edges = [[] for _ in range(n)]
g = GeometryTopology.Graph()
for _ in range(n - 1):
x, y = map(int, sys.stdin.readline().split())
x -= 1
y -= 1
g.add_edge(x, y, weight=1)
g.add_edge(y, x, weight=1)
g.init_tree()
# tree = GeometryTopology.TreeGraph(n, edges, 0)
q, *ab = map(int, sys.stdin.read().split())
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
print(g.find_dist(a, b) + 1)
class ABC015:
@staticmethod
def a():
a, b = sys.stdin.read().split()
print(a if len(a) > len(b) else b)
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
print(
np.ceil(
a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)
).astype(np.int8)
)
@staticmethod
def c():
n, k, *t = map(int, sys.stdin.read().split())
t = np.array(t).reshape(n, k)
x = np.zeros((1, 1), dtype=np.int8)
for i in range(n):
x = x.reshape(-1, 1) ^ t[i]
print("Found" if np.count_nonzero(x == 0) > 0 else "Nothing")
@staticmethod
def d():
w, n, k, *ab = map(int, sys.stdin.read().split())
dp = np.zeros((k + 1, w + 1), dtype=np.int32)
for a, b in zip(*[iter(ab)] * 2):
prev = dp.copy()
np.maximum(dp[1:, a:], prev[:-1, :-a] + b, out=dp[1:, a:])
print(dp[k][w])
class ABC016:
@staticmethod
def a():
m, d = map(int, sys.stdin.readline().split())
print("YES" if m % d == 0 else "NO")
@staticmethod
def b():
a, b, c = map(int, sys.stdin.readline().split())
f1, f2 = a + b == c, a - b == c
if f1 & f2:
print("?")
elif f1 & (~f2):
print("+")
elif (~f1) & f2:
print("-")
else:
print("!")
@staticmethod
def c():
n, _, *ab = map(int, sys.stdin.read().split())
friends = [0] * n
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
friends[a] |= 1 << b
friends[b] |= 1 << a
res = [
bit_count(
cumor(friends[j] for j in range(n) if friends[i] >> j & 1)
& ~(friends[i] | 1 << i)
)
for i in range(n)
]
print(*res, sep="\n")
@staticmethod
def d():
sx, sy, gx, gy = map(int, sys.stdin.readline().split())
seg1 = ((sx, sy), (gx, gy))
n = int(sys.stdin.readline().rstrip())
p1 = (
np.array(sys.stdin.read().split(), dtype=np.int64)
.reshape(n, 2)
.T
)
p2 = np.hstack((p1[:, 1:], p1[:, :1]))
seg2 = (p1, p2)
print(
np.count_nonzero(GeometryTopology.intersect(seg1, seg2)) // 2
+ 1
)
class ABC017:
@staticmethod
def a():
s, e = (
np.array(sys.stdin.read().split(), dtype=np.int16)
.reshape(3, 2)
.T
)
print((s // 10 * e).sum())
@staticmethod
def b():
choku_tail = set("ch, o, k, u".split(", "))
def is_choku(s):
if s == "":
return True
if len(s) >= 1 and (s[-1] in choku_tail) and is_choku(s[:-1]):
return True
if len(s) >= 2 and (s[-2:] in choku_tail) and is_choku(s[:-2]):
return True
return False
print("YES" if is_choku(sys.stdin.readline().rstrip()) else "NO")
@staticmethod
def c():
n, m, *lrs = map(int, sys.stdin.read().split())
l, r, s = np.array(lrs).reshape(n, 3).T
score = np.zeros((m + 1,), dtype=np.int32)
np.add.at(score, l - 1, s)
np.subtract.at(score, r, s)
np.cumsum(score, out=score)
print(s.sum() - score[:m].min())
@staticmethod
def d():
n, m, *f = map(int, sys.stdin.read().split())
prev = [0] * (n + 1)
tmp = defaultdict(int)
for i in range(n):
prev[i + 1] = tmp[f[i]]
tmp[f[i]] = i + 1
dp = [0] * (n + 1)
dp[0] = 1
l, s = 0, dp[0]
for i in range(1, n + 1):
while l < prev[i]:
s = (s - dp[l]) % MOD
l += 1
dp[i] = s
s = (s + dp[i]) % MOD
print(dp[n])
class ABC018:
@staticmethod
def a():
(*a,) = map(int, sys.stdin.read().split())
a = sorted(enumerate(a), key=lambda x: -x[1])
res = [None] * 3
for i in range(3):
res[a[i][0]] = i + 1
print(*res, sep="\n")
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
n, *lr = map(int, sys.stdin.read().split())
for l, r in zip(*[iter(lr)] * 2):
l -= 1
r -= 1
s = s[:l] + s[l : r + 1][::-1] + s[r + 1 :]
print(s)
@staticmethod
def c():
r, c, k = map(int, sys.stdin.readline().split())
s = np.array([list(s) for s in sys.stdin.read().split()])
s = np.pad(s, 1, constant_values="x")
a = np.zeros_like(s, dtype=np.float64)
a[s == "o"] = np.inf
for i in range(1, r + 1):
np.minimum(a[i - 1, :] + 1, a[i, :], out=a[i, :])
for i in range(r, 0, -1):
np.minimum(a[i + 1, :] + 1, a[i, :], out=a[i, :])
for j in range(1, c + 1):
np.minimum(a[:, j - 1] + 1, a[:, j], out=a[:, j])
for j in range(c, 0, -1):
np.minimum(a[:, j + 1] + 1, a[:, j], out=a[:, j])
print(np.count_nonzero(a >= k))
@staticmethod
def c_2():
r, c, k = map(int, sys.stdin.readline().split())
s = np.array([list(s) for s in sys.stdin.read().split()])
s = np.pad(s, 1, constant_values="x")
a = (s == "o").astype(np.int16)
a = distance_transform_cdt(a, metric="taxicab")
print(np.count_nonzero(a >= k))
@staticmethod
def d():
n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())
x, y, z = np.array(xyz).reshape(r, 3).T
h = np.zeros((n, m), dtype=np.int32)
h[x - 1, y - 1] = z
g = np.array([*itertools.combinations(range(n), p)])
print(np.sort(h[g].sum(axis=1), axis=1)[:, -q:].sum(axis=1).max())
class ABC019:
@staticmethod
def a():
(*a,) = map(int, sys.stdin.readline().split())
print(sorted(a)[1])
@staticmethod
def b():
s = sys.stdin.readline().rstrip() + "$"
cnt = 0
prev = "$"
t = ""
for c in s:
if c == prev:
cnt += 1
continue
t += prev + str(cnt)
prev = c
cnt = 1
print(t[2:])
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
res = set()
for x in a:
while not x & 1:
x >>= 1
res.add(x)
print(len(res))
@staticmethod
def d():
def inquire(u, v):
print(f"? {u} {v}".format(u, v), flush=True)
return int(sys.stdin.readline().rstrip())
n = int(sys.stdin.readline().rstrip())
u = sorted([(inquire(1, v), v) for v in range(2, n + 1)])[-1][1]
d = max((inquire(u, v)) for v in range(1, n + 1) if u != v)
print(f"! {d}")
class ABC020:
@staticmethod
def a():
print(
"ABC"
if int(sys.stdin.readline().rstrip()) == 1
else "chokudai"
)
@staticmethod
def b():
a, b = sys.stdin.readline().split()
print(int(a + b) * 2)
@staticmethod
def c():
h, w, t = map(int, sys.stdin.readline().split())
s = [list(s) for s in sys.stdin.read().split()]
for i in range(h):
for j in range(w):
if s[i][j] == "S":
sy, sx = i, j
if s[i][j] == "G":
gy, gx = i, j
s[sy][sx] = s[gy][gx] = "."
source, target = (sy, sx), (gy, gx)
def heuristic_function(u, v=target):
return abs(v[0] - u[0]) + abs(v[1] - u[0])
def min_time(x):
"""my lib"""
graph = GeometryTopology.Graph()
for i in range(h):
for j in range(w):
graph.add_node((i, j))
for i in range(h):
for j in range(w):
if i > 0:
graph.add_edge(
(i, j),
(i - 1, j),
weight=(1 if s[i - 1][j] == "." else x),
)
if i < h - 1:
graph.add_edge(
(i, j),
(i + 1, j),
weight=(1 if s[i + 1][j] == "." else x),
)
if j > 0:
graph.add_edge(
(i, j),
(i, j - 1),
weight=(1 if s[i][j - 1] == "." else x),
)
if j < w - 1:
graph.add_edge(
(i, j),
(i, j + 1),
weight=(1 if s[i][j + 1] == "." else x),
)
return graph.dijkstra(source)[target]
# return graph.astar(source, target, heuristic_function)
"""networkx"""
graph = nx.DiGraph()
for i in range(h):
for j in range(w):
if i > 0:
graph.add_edge(
(i, j),
(i - 1, j),
weight=(1 if s[i - 1][j] == "." else x),
)
if i < h - 1:
graph.add_edge(
(i, j),
(i + 1, j),
weight=(1 if s[i + 1][j] == "." else x),
)
if j > 0:
graph.add_edge(
(i, j),
(i, j - 1),
weight=(1 if s[i][j - 1] == "." else x),
)
if j < w - 1:
graph.add_edge(
(i, j),
(i, j + 1),
weight=(1 if s[i][j + 1] == "." else x),
)
return nx.dijkstra_path_length(graph, source, target)
return nx.astar_path_length(
graph, source, target, heuristic_function
)
def binary_search():
lo, hi = 1, t + 1
while lo + 1 < hi:
x = (lo + hi) // 2
if min_time(x) > t:
hi = x
else:
lo = x
return lo
print(binary_search())
@staticmethod
def d():
n, k = map(int, sys.stdin.readline().split())
div = sorted(NumberTheory.find_divisors(k))
l = len(div)
s = [0] * l
for i, d in enumerate(div):
s[i] = (1 + n // d) * (n // d) // 2 * d % MOD
for i in range(l - 1, -1, -1):
for j in range(i + 1, l):
if div[j] % div[i]:
continue
s[i] = (s[i] - s[j]) % MOD
print(
sum(s[i] * k // div[i] % MOD for i in range(l)) % MOD
) # ans is LCM.
class ABC021:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
s = [1 << i for i in range(5) if n >> i & 1]
print(len(s), *s, sep="\n")
@staticmethod
def b():
n, a, b, k, *p = map(int, sys.stdin.read().split())
print("YES" if len(set(p) | set([a, b])) == k + 2 else "NO")
@staticmethod
def c():
n, a, b, m, *xy = map(int, sys.stdin.read().split())
x, y = np.array(xy).reshape(m, 2).T - 1
a -= 1
b -= 1
g = csgraph_to_dense(
csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8)
)
g = np.logical_or(g, g.T)
paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)
paths[a, 0] = 1
while not paths[b, 0]:
paths = np.dot(g, paths) % MOD
print(paths[b, 0])
@staticmethod
def c_2():
n, a, b, m, *xy = map(int, sys.stdin.read().split())
a -= 1
b -= 1
g = GeometryTopology.Graph()
for x, y in zip(*[iter(xy)] * 2):
x -= 1
y -= 1
g.add_edge(x, y, weight=1)
g.add_edge(y, x, weight=1)
dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)
print(paths[b])
@staticmethod
def d():
n, k = map(int, sys.stdin.read().split())
combinatorics = Combinatorics()
print(combinatorics.mod_choose(n + k - 1, k))
class ABC022:
@staticmethod
def a():
n, s, t, *a = map(int, sys.stdin.read().split())
a = np.array(a)
np.cumsum(a, out=a)
print(((s <= a) & (a <= t)).sum())
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
c = Counter(a)
print(sum(c.values()) - len(c))
@staticmethod
def c():
n, m, *uvl = map(int, sys.stdin.read().split())
u, v, l = np.array(uvl).reshape(m, 3).T
u -= 1
v -= 1
g = csgraph_to_dense(csr_matrix((l, (u, v)), (n, n)))
g += g.T
g[g == 0] = np.inf
dist0 = g[0].copy()
g[0] = 0
g[:, 0] = 0
dist = shortest_path(g, method="FW", directed=False)
u, v = np.array([*itertools.combinations(range(1, n), 2)]).T
res = (dist0[u] + dist[u, v] + dist0[v]).min()
print(-1 if res == np.inf else int(res))
@staticmethod
def d():
n, *ab = map(int, sys.stdin.read().split())
c = np.array(ab).reshape(2, n, 2)
g = c.mean(axis=1)
d = np.sqrt(((c - g[:, None, :]) ** 2).sum(axis=-1)).sum(axis=1)
print(d[1] / d[0])
class ABC023:
@staticmethod
def a():
print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))
@staticmethod
def b():
n, s = sys.stdin.read().split()
n = int(n)
t = "b"
for i in range(n // 2):
if i % 3 == 0:
t = "a" + t + "c"
elif i % 3 == 1:
t = "c" + t + "a"
else:
t = "b" + t + "b"
print(n // 2 if t == s else -1)
@staticmethod
def b_2():
n, s = sys.stdin.read().split()
n = int(n)
if n & 1 ^ 1:
print(-1)
return
a = list("abc")
i = (1 - n // 2) % 3
for c in s:
if c != a[i]:
print(-1)
return
i = (i + 1) % 3
print(n // 2)
@staticmethod
def c():
h, w, k, n, *rc = map(int, sys.stdin.read().split())
r, c = np.array(rc).reshape(n, 2).T - 1
rb = np.bincount(r, minlength=h)
cb = np.bincount(c, minlength=w)
rbb = np.bincount(rb, minlength=k + 1)
cbb = np.bincount(cb, minlength=k + 1)
tot = (rbb[: k + 1] * cbb[k::-1]).sum()
real = np.bincount(rb[r] + cb[c] - 1, minlength=k + 1)
print(tot - real[k - 1] + real[k])
@staticmethod
def d():
n, *hs = map(int, sys.stdin.read().split())
h, s = np.array(hs).reshape(n, 2).T
t = np.arange(n)
def is_ok(x):
t_lim = (x - h) // s
t_lim.sort()
return np.all(t_lim >= t)
def binary_search():
lo, hi = 0, 10**14
while lo + 1 < hi:
x = (lo + hi) // 2
if is_ok(x):
hi = x
else:
lo = x
return hi
print(binary_search())
class ABC024:
@staticmethod
def a():
a, b, c, k, s, t = map(int, sys.stdin.read().split())
print(a * s + b * t - c * (s + t) * (s + t >= k))
@staticmethod
def b():
n, t, *a = map(int, sys.stdin.read().split())
a = np.array(a)
print(np.minimum(a[1:] - a[:-1], t).sum() + t)
@staticmethod
def c():
n, d, k, *lrst = map(int, sys.stdin.read().split())
lrst = np.array(lrst)
lr = lrst[: 2 * d].reshape(d, 2)
s, t = lrst[2 * d :].reshape(k, 2).T
day = np.zeros((k,), dtype=np.int32)
for i in range(d):
l, r = lr[i]
move = (l <= s) & (s <= r) & (s != t)
reach = move & (l <= t) & (t <= r)
s[move & (s < t)] = r
s[move & (s > t)] = l
s[reach] = t[reach]
day[reach] = i + 1
print(*day, sep="\n")
@staticmethod
def d():
a, b, c = map(int, sys.stdin.read().split())
p = MOD
denom = pow(a * b % p - b * c % p + c * a % p, p - 2, p)
w = (b * c - a * b) % p * denom % p
h = (b * c - a * c) % p * denom % p
print(h, w)
class ABC025:
@staticmethod
def a():
s, n = sys.stdin.read().split()
n = int(n)
i, j = divmod(n - 1, 5)
print(s[i] + s[j])
@staticmethod
def b():
n, a, b = map(int, sys.stdin.readline().split())
res = defaultdict(int)
for _ in range(n):
s, d = sys.stdin.readline().split()
d = int(d)
res[s] += min(max(d, a), b)
res = res["East"] - res["West"]
if res == 0:
ans = 0
elif res > 0:
ans = f"East {res}"
else:
ans = f"West {-res}"
print(ans)
@staticmethod
def c():
b = [0] * 6
for i in range(2):
(*row,) = map(int, sys.stdin.readline().split())
for j in range(3):
b[i * 3 + j] = row[j]
c = [0] * 8
for i in range(3):
(*row,) = map(int, sys.stdin.readline().split())
for j in range(2):
c[i * 3 + j] = row[j]
tot = sum(b) + sum(c)
@lru_cache(maxsize=None)
def f(s=tuple(0 for _ in range(9))):
if all(s):
res = 0
for i in range(6):
res += (s[i] == s[i + 3]) * b[i]
for i in range(8):
res += (s[i] == s[i + 1]) * c[i]
return res
cand = [i for i in range(9) if not s[i]]
flg = len(cand) & 1
s = list(s)
res = []
for i in cand:
s[i] = (flg ^ 1) + 1
res.append(f(tuple(s)))
s[i] = 0
return sorted(res, reverse=flg)[0]
a = f()
b = tot - a
print(a)
print(b)
class ABC026:
@staticmethod
def a():
a = int(sys.stdin.readline().rstrip())
print(a // 2 * (a - a // 2))
@staticmethod
def b():
n, *r = map(int, sys.stdin.read().split())
s = np.pi * np.array([0] + r) ** 2
s.sort()
res = s[n::-2].sum() - s[n - 1 :: -2].sum()
print(res)
@staticmethod
def c():
n, *b = map(int, sys.stdin.read().split())
g = GeometryTopology.Graph()
for i in range(1, n):
g.add_edge(b[i - 1] - 1, i, weight=1)
def f(u=0):
if not g.edges[u]:
return 1
s = [f(v) for v in g.edges[u]]
return max(s) + min(s) + 1
print(f())
@staticmethod
def d():
a, b, c = map(int, sys.stdin.readline().split())
def f(t):
return a * t + b * np.sin(c * t * np.pi) - 100
print(optimize.brenth(f, 0, 200))
class ABC027:
@staticmethod
def a():
l = [int(l) for l in sys.stdin.readline().split()]
l.sort()
print(l[2] if l[0] == l[1] else l[0])
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
m, r = divmod(sum(a), n)
if r:
print(-1)
return
population = 0
towns = 0
cnt = 0
for x in a:
population += x
towns += 1
if population / towns != m:
cnt += 1
continue
population, towns = 0, 0
print(cnt)
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip())
flg = n.bit_length() & 1 ^ 1
t = 0
x = 1
while x <= n:
t += 1
x = 2 * x + 1 if t & 1 ^ flg else 2 * x
print("Aoki" if t & 1 else "Takahashi")
class ABC032:
@staticmethod
def a():
a, b, n = map(int, sys.stdin.read().split())
l = NumberTheory.lcm(a, b)
print((n + l - 1) // l * l)
@staticmethod
def b():
s, k = sys.stdin.read().split()
k = int(k)
res = set()
for i in range(len(s) - k + 1):
res.add(s[i : i + k])
print(len(res))
@staticmethod
def c():
n, k, *s = map(int, sys.stdin.read().split())
if 0 in s:
print(n)
return
s += [inf]
res = 0
l = r = 0
tmp = 1
while r <= n:
tmp *= s[r]
while tmp > k:
res = max(res, r - l)
tmp //= s[l]
l += 1
r += 1
print(res)
class ABC033:
@staticmethod
def a():
n = set(sys.stdin.readline().rstrip())
print("SAME" if len(n) == 1 else "DIFFERENT")
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
res = dict()
for _ in range(n):
s, p = sys.stdin.readline().split()
p = int(p)
res[s] = p
tot = sum(res.values())
for s, p in res.items():
if p > tot / 2:
print(s)
return
print("atcoder")
@staticmethod
def c():
s = sys.stdin.readline().rstrip()
res = sum(not "0" in f for f in s.split("+"))
print(res)
class ABC034:
@staticmethod
def a():
x, y = map(int, sys.stdin.readline().split())
print("Better" if y > x else "Worse")
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
print(n + 1 if n & 1 else n - 1)
@staticmethod
def c():
h, w = map(int, sys.stdin.read().split())
combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)
print(combinatorics.mod_choose(h + w - 2, h - 1))
@staticmethod
def d():
n, k, *wp = map(int, sys.stdin.read().split())
w, p = np.array(wp).reshape(-1, 2).T
def f(x):
return np.sort(w * (p - x))[-k:].sum()
print(optimize.bisect(f, 0, 100))
class ABC035:
@staticmethod
def a():
w, h = map(int, sys.stdin.readline().split())
print("4:3" if 4 * h == 3 * w else "16:9")
@staticmethod
def b():
s, t = sys.stdin.read().split()
y = 0
x = 0
z = 0
for c in s:
if c == "?":
z += 1
elif c == "L":
x -= 1
elif c == "R":
x += 1
elif c == "D":
y -= 1
elif c == "U":
y += 1
d = abs(y) + abs(x)
if t == "1":
print(d + z)
else:
print(max(d - z, (d - z) & 1))
@staticmethod
def c():
n, q, *lr = map(int, sys.stdin.read().split())
l, r = np.array(lr).reshape(q, 2).T
res = np.zeros(n + 1, dtype=int)
np.add.at(res, l - 1, 1)
np.subtract.at(res, r, 1)
np.cumsum(res, out=res)
res = res & 1
print("".join(map(str, res[:-1])))
@staticmethod
def d():
n, m, t = map(int, sys.stdin.readline().split())
point = np.array(sys.stdin.readline().split(), dtype=int)
a, b, c = (
np.array(sys.stdin.read().split(), dtype=np.int64)
.reshape(m, 3)
.T
)
a -= 1
b -= 1
d_1 = shortest_path(
csr_matrix((c, (a, b)), (n, n)),
method="D",
directed=True,
indices=0,
)
d_2 = shortest_path(
csr_matrix((c, (b, a)), (n, n)),
method="D",
directed=True,
indices=0,
)
print(int(np.amax((t - (d_1 + d_2)) * point)))
class ABC036:
@staticmethod
def a():
a, b = map(int, sys.stdin.readline().split())
print((b + a - 1) // a)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
n = int(n)
for j in range(n):
row = ""
for i in range(n - 1, -1, -1):
row += s[i][j]
print(row)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
b = [None] * n
prev = None
j = -1
for i, x in sorted(enumerate(a), key=lambda x: x[1]):
if x != prev:
j += 1
b[i] = j
prev = x
print(*b, sep="\n")
@staticmethod
def d():
n, *ab = map(int, sys.stdin.read().split())
edges = [[] for _ in range(n)]
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
edges[a].append(b)
edges[b].append(a)
parent = [None] * n
def count(u):
black, white = 1, 1
for v in edges[u]:
if v == parent[u]:
continue
parent[v] = u
b, w = count(v)
black *= w
black %= MOD
white *= (b + w) % MOD
white %= MOD
return black, white
print(sum(count(0)) % MOD)
class ABC037:
@staticmethod
def a():
a, b, c = map(int, sys.stdin.readline().split())
print(c // min(a, b))
@staticmethod
def b():
n, q, *lrt = map(int, sys.stdin.read().split())
a = np.zeros(n, dtype=int)
for l, r, t in zip(*[iter(lrt)] * 3):
a[l - 1 : r] = t
print(*a, sep="\n")
@staticmethod
def c():
n, k, *a = map(int, sys.stdin.read().split())
a = np.array([0] + a)
np.cumsum(a, out=a)
s = (a[k:] - a[:-k]).sum()
print(s)
@staticmethod
def d():
h, w = map(int, sys.stdin.readline().split())
a = [
[int(x) for x in sys.stdin.readline().split()]
for _ in range(h)
]
dyx = [(-1, 0), (0, -1), (1, 0), (0, 1)]
path = [[None] * w for _ in range(h)]
def paths(i, j):
if path[i][j]:
return path[i][j]
val = a[i][j]
cnt = 1
for dy, dx in dyx:
y = i + dy
x = j + dx
if 0 <= y < h and 0 <= x < w and a[y][x] < val:
cnt += paths(y, x)
cnt %= MOD
path[i][j] = cnt
return cnt
tot = 0
for i in range(h):
for j in range(w):
tot += paths(i, j)
tot %= MOD
print(tot)
class ABC038:
@staticmethod
def a():
s = sys.stdin.readline().rstrip()
print("YES" if s[-1] == "T" else "NO")
@staticmethod
def b():
a, b, c, d = map(int, sys.stdin.read().split())
print("YES" if a == c or b == c or a == d or b == d else "NO")
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a += [-1]
cnt = n
tmp = 1
for i in range(n):
if a[i + 1] > a[i]:
tmp += 1
else:
cnt += tmp * (tmp - 1) // 2
tmp = 1
print(cnt)
@staticmethod
def d():
n, *wh = map(int, sys.stdin.read().split())
wh = sorted(zip(*[iter(wh)] * 2), key=lambda x: (-x[0], x[1]))
w = [x[1] for x in wh][::-1]
res = [inf] * n
for x in w:
res[bi_l(res, x)] = x
print(bi_l(res, inf))
class ABC039:
@staticmethod
def a():
a, b, c = map(int, sys.stdin.readline().split())
print((a * b + b * c + c * a) * 2)
@staticmethod
def b():
x = int(sys.stdin.readline().rstrip())
for n in range(1, int(x**0.5) + 1):
if pow(n, 4) == x:
print(n)
return
@staticmethod
def c():
board = "WBWBWWBWBWBW" * 3
convert = "Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si".split(", ")
s = sys.stdin.readline().rstrip()
print(convert[board.index(s)])
@staticmethod
def d():
h, w = map(int, sys.stdin.readline().split())
s = sys.stdin.read().split()
dyx = list(itertools.product((-1, 0, 1), repeat=2))
black_certain = set()
black_before = set()
for i in range(h):
for j in range(w):
black_cand = set()
for dy, dx in dyx:
y = i + dy
x = j + dx
if y < 0 or y >= h or x < 0 or x >= w:
continue
if s[y][x] == ".":
break
black_cand.add((y, x))
else:
black_before.add((i, j))
black_certain |= black_cand
for i in range(h):
for j in range(w):
if s[i][j] == "#" and not (i, j) in black_certain:
print("impossible")
return
print("possible")
for i in range(h):
row = ""
for j in range(w):
row += "#" if (i, j) in black_before else "."
print("".join(row))
class ABC040:
@staticmethod
def a():
n, x = map(int, sys.stdin.readline().split())
print(min(x - 1, n - x))
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
res = inf
for i in range(1, int(n**0.5) + 1):
res = min(res, n // i - i + n % i)
print(res)
@staticmethod
def c():
n, *h = map(int, sys.stdin.read().split())
h = [h[0]] + h
cost = [None] * (n + 1)
cost[0] = cost[1] = 0
for i in range(2, n + 1):
cost[i] = min(
cost[i - 2] + abs(h[i] - h[i - 2]),
cost[i - 1] + abs(h[i] - h[i - 1]),
)
print(cost[n])
@staticmethod
def d():
n, m = map(int, sys.stdin.readline().split())
uf = GeometryTopology.UnionFind(n=n)
queue = []
for _ in range(m):
a, b, y = map(int, sys.stdin.readline().split())
heappush(queue, (-(2 * y), a - 1, b - 1))
q = int(sys.stdin.readline().rstrip())
for i in range(q):
v, y = map(int, sys.stdin.readline().split())
heappush(queue, (-(2 * y + 1), v - 1, i))
res = [None] * q
while queue:
y, i, j = heappop(queue)
if y & 1:
res[j] = uf.size[uf.find_root(i)]
else:
uf.unite(i, j)
print(*res, sep="\n")
class ABC041:
@staticmethod
def a():
s, i = sys.stdin.read().split()
i = int(i)
print(s[i - 1])
@staticmethod
def b():
a, b, c = map(int, sys.stdin.readline().split())
ans = a * b % MOD * c % MOD
print(ans)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
for i, h in sorted(enumerate(a), key=lambda x: -x[1]):
print(i + 1)
@staticmethod
def d():
n, m, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*[iter(xy)] * 2)
edges = [0] * n
for x, y in xy:
x -= 1
y -= 1
edges[x] |= 1 << y
comb = [None] * (1 << n)
comb[0] = 1
def count(edges, bit):
if comb[bit] is not None:
return comb[bit]
comb[bit] = 0
for i in range(n):
if (bit >> i) & 1 and not edges[i]:
nxt_bit = bit & ~(1 << i)
nxt_edges = edges.copy()
for j in range(n):
nxt_edges[j] &= ~(1 << i)
cnt = count(nxt_edges, nxt_bit)
comb[bit] += cnt
return comb[bit]
print(count(edges, (1 << n) - 1))
class ABC042:
@staticmethod
def a():
a = [int(x) for x in sys.stdin.readline().split()]
c = Counter(a)
print("YES" if c[5] == 2 and c[7] == 1 else "NO")
@staticmethod
def b():
n, l, *s = sys.stdin.read().split()
print("".join(sorted(s)))
@staticmethod
def c():
n, k, *d = sys.stdin.read().split()
l = len(n)
ok = sorted(set(string.digits) - set(d))
cand = [
int("".join(p)) for p in itertools.product(ok, repeat=l)
] + [int(min(x for x in ok if x > "0") + min(ok) * l)]
print(cand[bi_l(cand, int(n))])
@staticmethod
def d():
h, w, a, b = map(int, sys.stdin.read().split())
combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)
tot = combinatorics.mod_choose(h + w - 2, h - 1)
i = np.arange(h - a, h)
ng = np.sum(
combinatorics.mod_choose(i + b - 1, i)
* combinatorics.mod_choose(h - i + w - b - 2, h - 1 - i)
% MOD
)
tot -= ng
tot %= MOD
print(tot)
class ABC043:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print((1 + n) * n // 2)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
t = ""
for c in s:
if c == "B":
t = t[:-1]
else:
t += c
print(t)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
x = np.around(a.sum() / n).astype(int)
print(np.sum((a - x) ** 2))
@staticmethod
def d():
s = sys.stdin.readline().rstrip()
n = len(s)
for i in range(n - 1):
if s[i] == s[i + 1]:
print(i + 1, i + 2)
return
for i in range(n - 2):
if s[i] == s[i + 2]:
print(i + 1, i + 3)
return
print(-1, -1)
class ABC170:
@staticmethod
def a():
x = [int(x) for x in sys.stdin.readline().split()]
for i in range(5):
if x[i] != i + 1:
print(i + 1)
break
@staticmethod
def b():
x, y = map(int, sys.stdin.readline().split())
print("Yes" if 2 * x <= y <= 4 * x and y % 2 == 0 else "No")
@staticmethod
def c():
x, n, *p = map(int, sys.stdin.read().split())
a = list(set(range(102)) - set(p))
a = [(abs(y - x), y) for y in a]
print(sorted(a)[0][1])
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
cand = set(a)
cnt = 0
for x, c in sorted(Counter(a).items()):
cnt += c == 1 and x in cand
cand -= set(range(x * 2, 10**6 + 1, x))
print(cnt)
@staticmethod
def e():
n, q = map(int, sys.stdin.readline().split())
queue = []
m = 2 * 10**5
infants = [[] for _ in range(m)]
highest_rate = [None] * m
where = [None] * n
rate = [None] * n
def entry(i, k):
where[i] = k
while infants[k]:
r, j = heappop(infants[k])
if where[j] != k or j == i:
continue
if rate[i] >= -r:
highest_rate[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(infants[k], (r, j))
break
else:
highest_rate[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(infants[k], (-rate[i], i))
def transfer(i, k):
now = where[i]
while infants[now]:
r, j = heappop(infants[now])
if where[j] != now or j == i:
continue
if highest_rate[now] != -r:
highest_rate[now] = -r
heappush(queue, (-r, now, j))
heappush(infants[now], (r, j))
break
else:
highest_rate[now] = None
entry(i, k)
def inquire():
while True:
r, k, i = heappop(queue)
if where[i] != k or r != highest_rate[k]:
continue
heappush(queue, (r, k, i))
return r
for i in range(n):
a, b = map(int, sys.stdin.readline().split())
rate[i] = a
entry(i, b - 1)
for _ in range(q):
c, d = map(int, sys.stdin.readline().split())
transfer(c - 1, d - 1)
print(inquire())
class ABC171:
@staticmethod
def a():
c = sys.stdin.readline().rstrip()
print("A" if c < "a" else "a")
@staticmethod
def b():
n, k, *p = map(int, sys.stdin.read().split())
print(sum(sorted(p)[:k]))
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip())
n -= 1
l = 1
while True:
if n < pow(26, l):
break
n -= pow(26, l)
l += 1
res = "".join(
[chr(ord("a") + d) for d in NumberTheory.base_convert(n, 26)][
::-1
]
)
res = "a" * (l - len(res)) + res
print(res)
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
s = sum(a)
cnt = Counter(a)
q = int(sys.stdin.readline().rstrip())
for _ in range(q):
b, c = map(int, sys.stdin.readline().split())
s += (c - b) * cnt[b]
print(s)
cnt[c] += cnt[b]
cnt[b] = 0
@staticmethod
def e():
n, *a = map(int, sys.stdin.read().split())
s = 0
for x in a:
s ^= x
b = map(lambda x: x ^ s, a)
print(*b, sep=" ")
class ABC172:
@staticmethod
def a():
a = int(sys.stdin.readline().rstrip())
print(a * (1 + a + a**2))
@staticmethod
def b():
s, t = sys.stdin.read().split()
print(sum(s[i] != t[i] for i in range(len(s))))
@staticmethod
def c():
n, m, k = map(int, sys.stdin.readline().split())
a = [0] + [int(x) for x in sys.stdin.readline().split()]
b = [int(x) for x in sys.stdin.readline().split()]
(*sa,) = itertools.accumulate(a)
(*sb,) = itertools.accumulate(b)
res = 0
for i in range(n + 1):
r = k - sa[i]
if r < 0:
break
res = max(res, i + bi_r(sb, r))
print(res)
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
f = np.zeros(n + 1, dtype=np.int64)
for i in range(1, n + 1):
f[i::i] += 1
print((np.arange(1, n + 1) * f[1:]).sum())
class ABC173:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
charge = (n + 999) // 1000 * 1000 - n
print(charge)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
c = Counter(s)
for v in "AC, WA, TLE, RE".split(", "):
print(f"{v} x {c[v]}")
@staticmethod
def c():
h, w, k = map(int, sys.stdin.readline().split())
c = [sys.stdin.readline().rstrip() for _ in range(h)]
tot = 0
for i in range(1 << h):
for j in range(1 << w):
cnt = 0
for y in range(h):
for x in range(w):
if i >> y & 1 or j >> x & 1:
continue
cnt += c[y][x] == "#"
tot += cnt == k
print(tot)
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
a.sort(reverse=True)
res = (
a[0]
+ sum(a[1 : 1 + (n - 2) // 2]) * 2
+ a[1 + (n - 2) // 2] * (n & 1)
)
print(res)
@staticmethod
def e():
MOD = 10**9 + 7
n, k, *a = map(int, sys.stdin.read().split())
minus = [x for x in a if x < 0]
plus = [x for x in a if x > 0]
if len(plus) + len(minus) // 2 * 2 >= k: # plus
(*minus,) = map(abs, minus)
minus.sort(reverse=True)
plus.sort(reverse=True)
cand = []
if len(minus) & 1:
minus = minus[:-1]
for i in range(0, len(minus) - 1, 2):
cand.append(minus[i] * minus[i + 1] % MOD)
if k & 1:
res = plus[0]
plus = plus[1:]
else:
res = 1
if len(plus) & 1:
plus = plus[:-1]
for i in range(0, len(plus) - 1, 2):
cand.append(plus[i] * plus[i + 1] % MOD)
cand.sort(reverse=True)
for x in cand[: k // 2]:
res *= x
res %= MOD
print(res)
elif 0 in a:
print(0)
else:
cand = sorted(map(abs, a))
res = 1
for i in range(k):
res *= cand[i]
res %= MOD
res = MOD - res
print(res)
pass
class ABC174:
@staticmethod
def a():
print("Yes" if int(sys.stdin.readline().rstrip()) >= 30 else "No")
class ACL001:
@staticmethod
def a():
n, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*[iter(xy)] * 2)
print(xy)
pass
class MSolutions2020:
@staticmethod
def a():
x = int(sys.stdin.readline().rstrip())
x -= 400
print(8 - x // 200)
@staticmethod
def b():
r, g, b, k = map(int, sys.stdin.read().split())
while k and g <= r:
g *= 2
k -= 1
while k and b <= g:
b *= 2
k -= 1
print("Yes" if r < g < b else "No")
@staticmethod
def c():
n, k, *a = map(int, sys.stdin.read().split())
for i in range(k, n):
print("Yes" if a[i] > a[i - k] else "No")
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
a += [-1]
m = 1000
s = 0
for i in range(n):
if a[i + 1] == a[i]:
continue
elif a[i + 1] > a[i]:
cnt = m // a[i]
m -= a[i] * cnt
s += cnt
else:
m += a[i] * s
s = 0
print(m)
class Codeforces:
pass
class ProjectEuler:
@staticmethod
def p1():
def f(n, x):
return (x + n // x * x) * (n // x) // 2
n = 1000
ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)
print(ans)
@staticmethod
def p2():
fib = [1, 2]
while fib[-1] < 4 * 10**6:
fib.append(fib[-1] + fib[-2])
print(sum(fib[1:-1:3]))
@staticmethod
def p3():
number_theory = NumberTheory()
res = number_theory.prime_factorize(600851475143)
print(max(res.keys()))
@staticmethod
def p4():
def is_palindrome(n):
n = str(n)
return n == n[::-1]
cand = []
for a in range(100, 1000):
for b in range(a, 1000):
n = a * b
if is_palindrome(n):
cand.append(n)
print(max(cand))
@staticmethod
def p5():
number_theory = NumberTheory()
res = defaultdict(int)
for i in range(1, 21):
for p, c in number_theory.prime_factorize(i).items():
res[p] = max(res[p], c)
ans = 1
for p, c in res.items():
ans *= pow(p, c)
print(ans)
@staticmethod
def p6():
a = np.arange(101)
b = np.cumsum(a**2)
a = a.cumsum()
print(a[100] ** 2 - b[100])
@staticmethod
def p7():
number_theory = NumberTheory()
print(sorted(number_theory.prime_numbers)[10000])
@staticmethod
def p8():
n = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
n = [int(d) for d in list(n)]
res = 0
for i in range(988):
x = 1
for j in range(13):
x *= n[i + j]
res = max(res, x)
print(res)
@staticmethod
def p9():
for a in range(1, 997):
for b in range(a, 998 - a):
c = 1000 - a - b
if a**2 + b**2 == c**2:
print(a * b * c)
return
@staticmethod
def p10():
number_theory = NumberTheory(2 * 10**6 - 1)
print(sum(number_theory.prime_numbers))
@staticmethod
def p11():
grid = "08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"
# grid = np.array(grid.split(), dtype=np.int64).reshape(20, -1)
# cand = []
# for i in range(20):
# bl1 = i+3 < 20
# for j in range(20):
# bl2 = j+3 < 20
# if bl1:
# np.prod
# tmp = 1
# for d in range(4):
# tmp *= grid[i+d, j]
print(grid)
pass
class Yukicoder:
pass
if __name__ == "__main__":
AtCoder.ABC009.d()
| 32.195939 | 1,217 | 0.368661 | [
"MIT"
] | kagemeka/atcoder-submissions | jp.atcoder/abc009/abc009_4/17183548.py | 95,141 | Python |
import libtcodpy
libtcodpy.say_hello()
| 8.2 | 21 | 0.804878 | [
"Unlicense"
] | Rosuav/libtcodpy | tcodtest.py | 41 | Python |
# -*- coding: utf-8 -*-
"""hw06_training.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bjAeUIVjt9W8mASk8vw1y2SbuBYuQdlG
"""
!pip install reports
import PIL.Image as Image, requests, urllib, random
import argparse, json, PIL.Image, reports, os, pickle
from requests.exceptions import ConnectionError, ReadTimeout, TooManyRedirects, MissingSchema, InvalidURL
import numpy, torch, cv2, skimage
import skimage.io as io
from torch import nn
import torch.nn.functional as F
from pycocotools.coco import COCO
import glob
from torch.utils.data import DataLoader,Dataset
import torchvision.transforms as tvt
import matplotlib.pyplot as plt
from torchsummary import summary
import pandas as pd
# Mount google drive to run on Colab
#from google.colab import drive
#drive.mount('/content/drive')
#%cd "/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/"
#!pwd
#!ls
root_path = "/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/"
coco_json_path = "annotations/instances_train2017.json"
class_list = ["person", "dog", "hot dog"]
coco = COCO(coco_json_path)
class build_annotations:
# Structure of the all_annotations file:
# indexed by the image filepath, removing the '.jpg' or the string version (with zeros) of the imageID
# For each image:
# 'imageID': corresponds to the integer image ID assigned within COCO.
# 'num_objects': integer number of objects in the image (at most 5)
# 'bbox': a dictionary of the bounding box array for each instance within the image. The dictionary key is the string 0-5 of each instance in order of decreasing area
# 'labels': a dictionary of the labels of each instance within the image. The key is the same as bbox but the value is the integer category ID assigned within COCO.
def __init__(self, root_path, class_list, max_instances = 5):
self.root_path = root_path
self.image_dir = root_path + '*.jpg'
self.cat_IDs = coco.getCatIds(catNms=class_list)
self.max_instances = max_instances
def __call__(self):
all_annotations = {}
g = glob.glob(self.image_dir)
for i, filename in enumerate(g):
filename = filename.split('/')[-1]
img_ID = int(filename.split('.')[0])
ann_Ids = coco.getAnnIds(imgIds=img_ID, catIds = self.cat_IDs, iscrowd = False)
num_objects = min(len(ann_Ids), self.max_instances) # cap at a max of 5 images
anns = coco.loadAnns(ann_Ids)
indices = sort_by_area(anns, self.max_instances)
bbox = {}
label = {}
i = 0
for n in indices:
instance = anns[n]
bbox[str(i)] = instance['bbox']
label[str(i)] = instance['category_id']
i+=1
annotation= {"imageID":img_ID, "num_objects":i, 'bbox': bbox, 'labels':label}
all_annotations[filename.split('.')[0]] = annotation
ann_path = self.root_path + "image_annotations.p"
pickle.dump( all_annotations, open(ann_path, "wb" ) )
print('Annotations saved in:', ann_path)
def sort_by_area(anns, num):
areas = numpy.zeros(len(anns))
for i, instance in enumerate(anns):
areas[i] = instance['area']
indices = numpy.argsort(areas)[-num:]
return indices[::-1]
class your_dataset_class(Dataset):
def __init__(self, path, class_list, coco):
self.class_list = class_list
self.folder = path
self.coco = coco
self.catIds = coco.getCatIds(catNms = class_list)
self.imgIds = coco.getImgIds(catIds = self.catIds)
self.categories = coco.loadCats(self.catIds)
#create label dictionary
labeldict = {}
for idx, in_class in enumerate(self.class_list):
for c in self.categories:
if c["name"] == in_class:
labeldict[c['id']] = idx
self.coco_labeldict = labeldict
#if first time running, index the image dataset to make annotation .p file
annotation_path = path + 'image_annotations.p'
if os.path.exists(annotation_path) ==False:
print("Indexing dataset to compile annotations...")
dataset_annotations = build_annotations(path, class_list)
dataset_annotations()
self.data_anns = pickle.load(open(annotation_path, "rb" ))
def __len__(self):
g = glob.glob(self.folder + '*.jpg') # ,'*.jpg')
return (len(g))
def get_imagelabel(self, img_path, sc, max_objects = 5): #img_path = file location, sc = scale [0]: width, [1]: height
saved_filename = os.path.basename(img_path)
filename = saved_filename.split('.jpg')[0]
image_id = int(filename)#.split('_')[-1])
bbox_tensor = torch.zeros(max_objects, 4, dtype=torch.uint8)
label_tensor = torch.zeros(max_objects+1, dtype=torch.uint8) + len(self.class_list)
target_obj = self.data_anns[filename]
num_objects = target_obj['num_objects']
for n in range(num_objects):
[x,y,w,h] = target_obj['bbox'][str(n)]
bbox = [sc[1]*y, x*sc[0], sc[1]*(h), sc[0]*(w)]
bbox_tensor[n,:] = torch.tensor(numpy.array(bbox))
cat_label = target_obj['labels'][str(n)]
data_label = self.coco_labeldict[cat_label]
label_tensor[n] = torch.tensor(data_label)
return bbox_tensor, label_tensor
def __getitem__(self, item):
g = glob.glob(self.folder + '*.jpg') #'**/*.jpg') # , '*.jpg')
im = PIL.Image.open(g[item])
im, scale_fac = rescale_factor(im, 128) #overwrite old image with new resized image of size 256
W, H = im.size
transformer = tvt.Compose([tvt.ToTensor(), tvt.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
im_array = torch.randint(0, 256, (3, H, W)).type(torch.uint8)
for i in range(H):
for j in range(W):
im_array[:, j, i] = torch.tensor(im.getpixel((i, j)))
im_scaled = im_array / im_array.max() # scaled from 0-1
im_tf = transformer(numpy.transpose(im_scaled.numpy()))
num_classes = len(self.class_list)
bbox, label = self.get_imagelabel(g[item], scale_fac)
sample = {'im_ID': g[item],
'scale':scale_fac,
'image': im_tf,
'bbox' : bbox,
'label': label}
return sample
def rescale_factor(im_original, std_size):
raw_width, raw_height = im_original.size
im = im_original.resize((std_size, std_size), Image.BOX)
w_factor = std_size/raw_width
h_factor = std_size/raw_height
return (im, [w_factor, h_factor])
#train_path = os.path.join(root_path, "Train/")
train_path = root_path + "Train/"
val_path = os.path.join(root_path, "Val/")
batch_size = 64
train_dataset = your_dataset_class(train_path, class_list, coco)
#train_dataset.__getitem__(32)
train_data_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size = batch_size,
shuffle = True,
num_workers= 2,
drop_last=True)
#val_dataset = your_dataset_class(val_path, class_list)
#val_data_loader = torch.utils.data.DataLoader(dataset = val_dataset,
# batch_size = batch_size,
# shuffle = True,
# num_workers = 4,
# drop_last=True)
class SkipBlock(nn.Module):
def __init__(self,in_ch, out_ch, downsample = False):
super().__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.conv1 = nn.Conv2d(in_ch, out_ch, 3, stride = 1, padding = 1)
self.conv2 = nn.Conv2d(in_ch, out_ch, 3, padding = 1)
self.bnorm1 = nn.BatchNorm2d(out_ch)
self.bnorm2 = nn.BatchNorm2d(out_ch)
self.downsample_tf = downsample
self.downsampler = nn.Conv2d(in_ch, out_ch, 1, stride= 2)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bnorm1(out)
out = F.relu(out)
if self.downsample_tf == True:
identity = self.downsampler(identity)
out = self.downsampler(out)
out += identity
else:
out = self.conv2(out)
out = self.bnorm2(out)
out = F.relu(out)
out += identity
return out
class MechEnet(nn.Module):
def __init__(self, num_classes, depth):
super().__init__()
self.depth = depth // 8
self.conv_initial = nn.Conv2d( 3, 64, 3, padding = 1)
self.pool = nn.MaxPool2d(2,2)
## assume all layers are 64 channels deep
self.skipblock64_1 = nn.ModuleList()
for i in range(self.depth):
#print("adding layer", i)
self.skipblock64_1.append( SkipBlock(64,64, downsample = False) ) #append a 64 in/out ch layer - depth*2/4 convolutions
self.skip_downsample = SkipBlock(64,64, downsample= True)
self.skipblock64_2 = nn.ModuleList()
for i in range(self.depth):
#print("adding layer", i + self.depth)
self.skipblock64_2.append( SkipBlock(64,64, downsample = False) ) #append a 64 in/out layer - depth*2/4 convolutions
self.fc_seqn = nn.Sequential(
nn.Linear(64*4*4, 3000),
nn.ReLU(inplace =True),
nn.Linear(3000,3000),
nn.ReLU(inplace =True),
nn.Linear(3000,8*8*(5*(5+3))) #5 anchor boxes*(1+ bbox(4) + classes (3))
)
def forward(self, x):
# x1 is the output of classification
x = self.pool(F.relu(self.conv_initial(x)))
x1 = self.skip_downsample(x)
for i, skips in enumerate(self.skipblock64_1[self.depth//4 :]):
x1 = skips(x1)
x1 = self.skip_downsample(x1)
for i, skips in enumerate(self.skipblock64_1[:self.depth//4]):
x1 = skips(x1)
x1 = self.skip_downsample(x1)
for i, skips in enumerate(self.skipblock64_2[self.depth//4:]):
x1 = skips(x1)
x1 = self.skip_downsample(x1)
for i, skips in enumerate(self.skipblock64_2[:self.depth//4]):
x1 = skips(x1)
#x1 = self.skip_downsample(x)
x1 = x1.view(x1.size(0),-1)
x1 = self.fc_seqn(x1)
return x1
class IoULoss(torch.nn.Module):
def __init__(self, weight=None, size_average=True):
super(IoULoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
#flatten label and prediction tensors
# tensor shape = [b, yolo_cell, anch, yolovector]
# flattened tensor = [b, numcells*numanch*8]
b_size = inputs.shape[0]
pred_unscrm = inputs.view(b_size, 8**2, 5, -1)
targ_unscrm = targets.view(b_size, 8**2, 5, -1)
pred_bbox = pred_unscrm[:,:,:,1:5]
targ_bbox = targ_unscrm[:,:,:,1:5]
intersection = targ_bbox*pred_bbox
union = targ_bbox + pred_bbox
J_idx = torch.div(intersection, union)
#print(J_idx)
J_dist = 1.0-J_idx
return torch.sum(J_dist)
## significant code is adapted from Prof. Kak's Multi-instance detector
def run_code_for_training(net, lrate, mom, epochs, im_size, max_objects, yolo_interval = 16):
print('Beginning training for', epochs,'epochs...')
#criterion1 = torch.nn.CrossEntropyLoss()
criterion = torch.nn.MSELoss()
#criterion = IoULoss()
optimizer = torch.optim.SGD(net.parameters(), lr = lrate, momentum = mom)
loss_tracker = []
num_cells_image_height = im_size//yolo_interval
num_cells_image_width = im_size//yolo_interval
num_yolo_cells = num_cells_image_height*num_cells_image_width
print_iteration = 3
num_anchor_boxes = 5
yolo_tensor = torch.zeros(batch_size, num_yolo_cells, num_anchor_boxes, 1*5+3) #batch size, 8*8, 1*5+3 classes
class AnchorBox:
def __init__(self, AR, topleft, abox_h, abox_w, abox_idx):
self.AR = AR
self.topleft = topleft
self.abox_h = abox_h
self.abox_w = abox_w
self.abox_idx= abox_idx
device = torch.device("cuda:0")
for epoch in range(epochs):
print('\nEpoch %d training...' %(epoch + 1))
running_loss = 0.0
for i, data in enumerate(train_data_loader):
sample_batch = data['im_ID']
im_tensor = data["image"]
target_reg = data["bbox"].type(torch.FloatTensor)
target_clf = data["label"].type(torch.LongTensor)
optimizer.zero_grad()
im_tensor = im_tensor.to(device)
target_reg = target_reg.to(device)
target_clf = target_clf.to(device)
yolo_tensor = yolo_tensor.to(device)
obj_centers = {ibx :
{idx : None for idx in range(max_objects)}
for ibx in range(im_tensor.shape[0])}
anchor_boxes_1_1 = [[AnchorBox(1/1, (i*yolo_interval,j*yolo_interval), yolo_interval, yolo_interval, 0)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_1_3 = [[AnchorBox(1/3, (i*yolo_interval,j*yolo_interval), yolo_interval, 3*yolo_interval, 1)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_3_1 = [[AnchorBox(3/1, (i*yolo_interval,j*yolo_interval), 3*yolo_interval, yolo_interval, 2)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_1_5 = [[AnchorBox(1/5, (i*yolo_interval,j*yolo_interval), yolo_interval, 5*yolo_interval, 3)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_5_1 = [[AnchorBox(5/1, (i*yolo_interval,j*yolo_interval), 5*yolo_interval, yolo_interval, 4)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
#Build the yolo tensor based on the bounding box and label tensors from the target/dataset
for b in range(im_tensor.shape[0]): # Loop through batch index
for idx in range(max_objects): # Loop through each object in the target tensor
height_center_bb = (target_reg[b][idx][1].item() + target_reg[b][idx][3].item()) // 2
width_center_bb = (target_reg[b][idx][0].item() + target_reg[b][idx][2].item()) // 2
obj_bb_height = target_reg[b][idx][3].item() - target_reg[b][idx][1].item()
obj_bb_width = target_reg[b][idx][2].item() - target_reg[b][idx][0].item()
obj_label = target_clf[b][idx].item()
if obj_label == 13:
obj_label = 4
eps = 1e-8
AR = float(obj_bb_height + eps) / float(obj_bb_width + eps)
cell_row_idx = int(height_center_bb // yolo_interval) ## for the i coordinate
cell_col_idx = int(width_center_bb // yolo_interval) ## for the j coordinates
if AR <= 0.2: ## (F)
anchbox = anchor_boxes_1_5[cell_row_idx][cell_col_idx]
elif AR <= 0.5:
anchbox = anchor_boxes_1_3[cell_row_idx][cell_col_idx]
elif AR <= 1.5:
anchbox = anchor_boxes_1_1[cell_row_idx][cell_col_idx]
elif AR <= 4:
anchbox = anchor_boxes_3_1[cell_row_idx][cell_col_idx]
elif AR > 4:
anchbox = anchor_boxes_5_1[cell_row_idx][cell_col_idx]
bh = float(obj_bb_height) / float(yolo_interval) ## (G)
bw = float(obj_bb_width) / float(yolo_interval)
obj_center_x = float(target_reg[b][idx][2].item() + target_reg[b][idx][0].item()) / 2.0
obj_center_y = float(target_reg[b][idx][3].item() + target_reg[b][idx][1].item()) / 2.0
yolocell_center_i = cell_row_idx*yolo_interval + float(yolo_interval) / 2.0
yolocell_center_j = cell_col_idx*yolo_interval + float(yolo_interval) / 2.0
del_x = float(obj_center_x - yolocell_center_j) / yolo_interval
del_y = float(obj_center_y - yolocell_center_i) / yolo_interval
yolo_vector = [0, del_x, del_y, bh, bw, 0, 0, 0]
if obj_label<4:
yolo_vector[4 + obj_label] = 1
yolo_vector[0] = 1
yolo_cell_index = cell_row_idx * num_cells_image_width + cell_col_idx
yolo_tensor[b, yolo_cell_index, anchbox.abox_idx] = torch.FloatTensor( yolo_vector )
yolo_tensor_flattened = yolo_tensor.view(im_tensor.shape[0], -1)
## Foward Pass
pred_yolo = net(im_tensor)
#pred_yolo = filter_yolo_tensor(pred_yolo, im_tensor.shape[0], num_yolo_cells, num_anchor_boxes)
loss = criterion(pred_yolo, yolo_tensor_flattened)
loss.backward(retain_graph = True)
pred_unscrm = pred_yolo.view(im_tensor.shape[0], 8**2, 5, -1)
sample_yolo_tensor = pred_unscrm
optimizer.step()
running_loss += loss.item()
if (i+1)%print_iteration ==0:
average_loss = running_loss/float(print_iteration)
print("[epoch: %d, batch: %5d] Avg Batch loss: %.4f" %(epoch + 1, i+1, average_loss))
loss_tracker = numpy.append(loss_tracker, average_loss)
running_loss = 0.0
return loss_tracker, sample_yolo_tensor, sample_batch
def filter_yolo_tensor(yolo_tensor, batch_size, num_yolo_cells, aboxes):
#loop through each yolo_cell_index in the in the prediction tensor
# if idx[0] of the yolo vector is less than 0.5, make the whole vector zero
zero_vec = torch.zeros(8)
print(yolo_tensor.shape)
for b in range(batch_size):
for num in range(num_yolo_cells):
for an in range(aboxes):
if yolo_tensor[b,num][an][0] < 0.5:
yolo_tensor[b,num][an][:] = torch.zeros(8)
return yolo_tensor
model = MechEnet(len(class_list), depth = 64)
lrate = 5e-3
mom = 0.5
epochs = 1
yolo_int = 16
im_size = 128
max_objects = 5
savepath = "MechEnet.pth"
model.load_state_dict(torch.load(savepath))
if torch.cuda.is_available():
device = torch.device("cuda:0")
model.cuda()
summary(model, (3, im_size, im_size))
training_loss, yolo_sample, batches = run_code_for_training(model, lrate, mom, epochs, im_size, max_objects, yolo_interval = yolo_int)
#savepath = "/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/MechEnet.pth"
#torch.save(model.state_dict(), savepath)
#pd.DataFrame(training_loss).to_csv("/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/loss.csv")
fig, ax = plt.subplots()
ax.plot(training_loss)
ax.set_title('Training loss')
ax.set_ylabel('Loss')
ax.set_xlabel('Iterations')
## Visualize prediction on training set
annotation_path = root_path + 'Train/'+ 'image_annotations.p'
data_anns = pickle.load(open(annotation_path, "rb" ))
def show_image(image_anns):
img = coco.loadImgs(rand_img['imageID'])[0]
I = io.imread(img['coco_url'])
if len(I.shape) == 2:
I = skimage.color.gray2rgb(I)
catIds = coco.getCatIds(catNms= class_list)
annIds = coco.getAnnIds(imgIds=rand_img['imageID'], catIds= catIds, iscrowd=False)
anns = coco.loadAnns(annIds)
image = numpy.uint8(I)
for i in range(rand_img['num_objects']):
[x,y,w,h] = rand_img['bbox'][str(i)]
label = rand_img['labels'][str(i)]
image = cv2.rectangle(image, (int(x), int(y)), (int(x +w), int(y + h)), (36,255,12), 2)
class_label = coco_labels_inverse[label]
image = cv2.putText(image, 'True ' + class_list[class_label], (int(x), int(y-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (36,255,12), 2)
return image
bdx =37 #numpy.random.randint(0,64)
#55 #18 #5
img_loc = batches[bdx].split('/')[-1].split('.')[0]
rand_img = data_anns[img_loc]
image = show_image(rand_img)
scale = train_dataset.__getitem__(sdx)['scale']
g = glob.glob(root_path + 'Train/*.jpg')
for i in range(len(g)):
if img_loc in g[i]:
sdx = i
import math
im_considered = yolo_sample[bdx,:,:,:]
im_pred_anch = torch.zeros(64,8)
cell_pred = []
num_cell_width = 8
yolo_interval = 16
for i in range(im_considered.shape[0]):
AR = torch.argmax(im_considered[i,:,0])
im_pred_anch[i,:] = im_considered[i,AR,:]
if im_pred_anch[i,0] > 0.75:
if AR == 0:
w,h = 1,1
elif AR == 1:
w,h = 1,3
elif AR == 2:
w,h = 3,1
elif AR == 3:
w,h = 1,5
elif AR == 4:
w,h = 5,1
row_idx = math.floor(i/num_cell_width)
col_idx = i%num_cell_width
yolo_box = im_pred_anch[i,1:5].cpu().detach().numpy()
x1 = ((row_idx + 0.5)*yolo_interval)/scale[0]
x2 = x1 + (w*yolo_interval)/scale[0]
y1 = (col_idx + 0.5)*yolo_interval/scale[1]
y2 = y1+ (h*yolo_interval)/scale[1]
label = torch.argmax(im_pred_anch[i,5:]).cpu().detach().numpy()
pred_label = str('Predicted ' + class_list[label])
temp = [pred_label, x1,y1, x2,y2]
cell_pred = numpy.append(cell_pred, temp)
image = cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (255,0,0), 2)
image = cv2.putText(image, pred_label, (int(x1), int(y1-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2)
fig, ax = plt.subplots(1,1, dpi = 150)
ax.imshow(image)
ax.set_axis_off()
plt.axis('tight')
plt.show() | 39.083485 | 168 | 0.622661 | [
"MIT"
] | arao53/BME695-object-detection | hw06_train.py | 21,535 | Python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : spanish.py
@Time : 2020/08/21
@Author : JavierSC
@Version : 1.0
@Contact :
@Desc :
'''
class LangSpanish(object):
SETTING = "AJUSTES"
VALUE = "VALORES"
SETTING_DOWNLOAD_PATH = "Ruta de descarga"
SETTING_ONLY_M4A = "Convertir mp4 a m4a"
SETTING_ADD_EXPLICIT_TAG = "Añadir tag de 'Contenido explícito'"
SETTING_ADD_HYPHEN = "Agregar guión"
SETTING_ADD_YEAR = "Agregar año en la carpeta del álbum"
SETTING_USE_TRACK_NUM = "Agregar número de la pista"
SETTING_AUDIO_QUALITY = "Calidad de audio"
SETTING_VIDEO_QUALITY = "Calidad de video"
SETTING_CHECK_EXIST = "Verificar si existe"
SETTING_ARTIST_BEFORE_TITLE = "Nombre del artista en el título de la pista"
SETTING_ALBUMID_BEFORE_FOLDER = "Añadir ID de la carpeta del álbum"
SETTING_INCLUDE_EP = "Incluir Sencillos y EPs"
SETTING_SAVE_COVERS = "Guardar carátulas"
SETTING_LANGUAGE = "Idioma"
SETTING_USE_PLAYLIST_FOLDER = "Usar directorio de la lista de reproducción"
SETTING_MULITHREAD_DOWNLOAD = "Descarga Multi-hilo"
SETTING_ALBUM_FOLDER_FORMAT = "Formato del nombre de carpeta del álbum"
SETTING_TRACK_FILE_FORMAT = "Formato del nombre de archivo de la pista"
SETTING_SHOW_PROGRESS = "Mostrar progreso"
CHOICE = "SELECCIÓN"
FUNCTION = "FUNCIÓN"
CHOICE_ENTER = "Ingresar"
CHOICE_ENTER_URLID = "Ingresar 'Url/ID':"
CHOICE_EXIT = "Salir"
CHOICE_LOGIN = "Login"
CHOICE_SETTINGS = "Ajustes"
CHOICE_SET_ACCESS_TOKEN = "Establecer AccessToken"
CHOICE_DOWNLOAD_BY_URL = "Descargar por Url o ID"
PRINT_ERR = "[ERR]"
PRINT_INFO = "[INFO]"
PRINT_SUCCESS = "[EXITO]"
PRINT_ENTER_CHOICE = "Ingresar Selección:"
PRINT_LATEST_VERSION = "Ultima versión:"
PRINT_USERNAME = "nombre de usuario:"
PRINT_PASSWORD = "contraseña:"
CHANGE_START_SETTINGS = "Iniciar ajustes('0'-Volver,'1'-Si):"
CHANGE_DOWNLOAD_PATH = "Ruta de descarga('0' No modificar):"
CHANGE_AUDIO_QUALITY = "Calidad de audio('0'-Normal,'1'-High,'2'-HiFi,'3'-Master):"
CHANGE_VIDEO_QUALITY = "Calidad de video('0'-1080,'1'-720,'2'-480,'3'-360):"
CHANGE_ONLYM4A = "Convertir mp4 a m4a('0'-No,'1'-Si):"
CHANGE_ADD_EXPLICIT_TAG = "Agregar tag de contenido explícito a los nombres de archivo('0'-No,'1'-Si):"
CHANGE_ADD_HYPHEN = "Usar guiones en lugar de espacios en el nombre de los archivos('0'-No,'1'-Si):"
CHANGE_ADD_YEAR = "Agregar año a el nombre de las carpetas del álbum('0'-No,'1'-Si):"
CHANGE_USE_TRACK_NUM = "Agregar número de la pista('0'-No,'1'-Si):"
CHANGE_CHECK_EXIST = "Verificar si el archivo existe antes de descargar la pista('0'-No,'1'-Si):"
CHANGE_ARTIST_BEFORE_TITLE = "Añadir el nombre del artista en el título de la pista('0'-No,'1'-Si):"
CHANGE_INCLUDE_EP = "Incluir Sencillos y EPs al descargar el álbum del artista('0'-No,'1'-Si):"
CHANGE_ALBUMID_BEFORE_FOLDER = "Añadir ID de la carpeta del álbum('0'-No,'1'-Si):"
CHANGE_SAVE_COVERS = "Guardar carátulas('0'-No,'1'-Si):"
CHANGE_LANGUAGE = "Seleccione el idioma"
CHANGE_ALBUM_FOLDER_FORMAT = "Formato del nombre de carpeta del álbum('0' No modificar):"
CHANGE_TRACK_FILE_FORMAT = "Formato del nombre de archivo de la pista('0' No modificar):"
CHANGE_SHOW_PROGRESS = "Mostrar progreso('0'-No,'1'-Yes):"
MSG_INVAILD_ACCESSTOKEN = "AccessToken invalido! Por favor reinicie"
MSG_PATH_ERR = "La ruta no es correcta!"
MSG_INPUT_ERR = "Error de entrada!"
MODEL_ALBUM_PROPERTY = "PROPIEDAD-DE-ÁLBUM"
MODEL_TRACK_PROPERTY = "PROPIEDAD-DE-PISTA"
MODEL_VIDEO_PROPERTY = "PROPIEDAD-DE-VIDEO"
MODEL_ARTIST_PROPERTY = "PROPIEDAD-DE-ARTISTA"
MODEL_PLAYLIST_PROPERTY = "PROPIEDAD-DE-PLAYLIST"
MODEL_TITLE = 'Título'
MODEL_TRACK_NUMBER = 'Numero de pista'
MODEL_VIDEO_NUMBER = 'Numero de video'
MODEL_RELEASE_DATE = 'Fecha de lanzamiento'
MODEL_VERSION = 'Versión'
MODEL_EXPLICIT = 'Explícito'
MODEL_ALBUM = 'Álbum'
MODEL_ID = 'ID'
MODEL_NAME = 'Nombre'
MODEL_TYPE = 'Tipo'
| 45.129032 | 108 | 0.688825 | [
"Apache-2.0"
] | joyel24/Tidal-Media-Downloader | TIDALDL-PY/tidal_dl/lang/spanish.py | 4,230 | Python |
# -*- coding: utf-8 -*-
"""
ruobr.ru/api
~~~~~~~~~~~~
Библиотека для доступа к API электронного дневника.
Пример:
>>> from ruobr_api import Ruobr
>>> r = Ruobr('username', 'password')
>>> r.getUser()
User(id=7592904, status='child', first_name='Иван', last_name='Иванов', middle_name='Иванович', school='69-МБОУ "СОШ №69"', school_is_tourniquet=False, readonly=False, school_is_food=True, group='10А', gps_tracker=False)
:authors: raitonoberu
:license: Apache License, Version 2.0, see LICENSE file
:copyright: (c) 2021 raitonoberu
"""
from .__main__ import (
Ruobr,
AsyncRuobr,
AuthenticationException,
NoSuccessException,
NoChildrenException,
)
__author__ = "raitonoberu"
__version__ = "1.2.1"
__email__ = "[email protected]"
| 25.566667 | 223 | 0.694915 | [
"Apache-2.0"
] | raitonoberu/ruobr_api | ruobr_api/__init__.py | 842 | Python |
#coding:utf-8
import urllib2
import sys,socket
def elasticburp(ip,port):
addr = (ip,int(port))
url = "http://" + ip + ":" + str(port) + "/_cat"
sock_9200 = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
sock_9200.settimeout(1)
sock_9200.connect(addr)
print '%s 9200 open!'
try:
data = urllib2.urlopen(url).read()
if '/_cat/master' in data:
sys.stdout.write('%s:%d is ElasticSearch Unauthorized\n' % (ip, port))
except:
pass
except:
sock_9200.close() | 20.892857 | 86 | 0.555556 | [
"MIT"
] | webvul/Allscanner | elasticsearch/elasticsearch.py | 585 | Python |
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "[email protected]"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
| 32.655791 | 115 | 0.575082 | [
"MIT"
] | EelcoHoogendoorn/Numpy_arraysetops_EP | numpy_indexed/grouping.py | 20,018 | Python |
import argparse
import os
from multiprocessing import Process, Queue
import time
import logging
log = logging.getLogger(__name__)
from scipy import linalg
import cooler
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
from hicmatrix import HiCMatrix as hm
import numpy as np
from scipy.sparse import csr_matrix
from holoviews.plotting.util import process_cmap
from schicexplorer._version import __version__
from schicexplorer.utilities import cell_name_list, create_csr_matrix_all_cells
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
description='scHicCluster uses kmeans or spectral clustering to associate each cell to a cluster and therefore to its cell cycle. '
'The clustering can be run on the raw data, on a kNN computed via the exact euclidean distance or via PCA. '
'Please consider also the other clustering and dimension reduction approaches of the scHicExplorer suite. They can give you better results, '
'can be faster or less memory demanding.'
)
parserRequired = parser.add_argument_group('Required arguments')
# define the arguments
parserRequired.add_argument('--matrix', '-m',
help='The single cell Hi-C interaction matrices to cluster. Needs to be in scool format',
metavar='scool scHi-C matrix',
required=True)
parserRequired.add_argument('--numberOfClusters', '-c',
help='Number of to be computed clusters',
required=False,
default=12,
type=int)
parserRequired.add_argument('--clusterMethod', '-cm',
help='Algorithm to cluster the Hi-C matrices',
choices=['spectral', 'kmeans'],
default='spectral')
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--chromosomes',
help='List of to be plotted chromosomes',
nargs='+')
parserOpt.add_argument('--intraChromosomalContactsOnly', '-ic',
help='This option loads only the intra-chromosomal contacts. Can improve the cluster result if data is very noisy.',
action='store_true')
parserOpt.add_argument('--additionalPCA', '-pca',
help='Computes PCA on top of a k-nn. Can improve the cluster result.',
action='store_true')
parserOpt.add_argument('--dimensionsPCA', '-dim_pca',
help='The number of dimensions from the PCA matrix that should be considered for clustering. Can improve the cluster result.',
default=20,
type=int)
parserOpt.add_argument('--dimensionReductionMethod', '-drm',
help='Dimension reduction methods, knn with euclidean distance, pca',
choices=['none', 'knn', 'pca'],
default='none')
parserOpt.add_argument('--createScatterPlot', '-csp',
help='Create a scatter plot for the clustering, the x and y are the first and second principal component of the computed k-nn graph.',
required=False,
default=None)
parserOpt.add_argument('--numberOfNearestNeighbors', '-k',
help='Number of to be used computed nearest neighbors for the knn graph. Default is either the default value or the number of the provided cells, whatever is smaller.',
required=False,
default=100,
type=int)
parserOpt.add_argument('--dpi', '-d',
help='The dpi of the scatter plot.',
required=False,
default=300,
type=int)
parserOpt.add_argument('--outFileName', '-o',
help='File name to save the resulting clusters',
required=True,
default='clusters.txt')
parserOpt.add_argument('--cell_coloring_type', '-cct',
help='A two column list, first colum the cell names as stored in the scool file, second column the associated coloring for the scatter plot',
required=False)
parserOpt.add_argument('--cell_coloring_batch', '-ccb',
help='A two column list, first colum the cell names as stored in the scool file, second column the associated coloring for the scatter plot',
required=False)
parserOpt.add_argument('--latexTable', '-lt',
help='Return the overlap statistics if --cell_coloring_type is given as a latex table.')
parserOpt.add_argument('--figuresize',
help='Fontsize in the plot for x and y axis.',
type=float,
nargs=2,
default=(15, 6),
metavar=('x-size', 'y-size'))
parserOpt.add_argument('--colorMap',
help='Color map to use for the heatmap, supported are the categorical colormaps from holoviews: '
'http://holoviews.org/user_guide/Colormaps.html',
default='glasbey_dark')
parserOpt.add_argument('--fontsize',
help='Fontsize in the plot for x and y axis.',
type=float,
default=15)
parserOpt.add_argument('--threads', '-t',
help='Number of threads. Using the python multiprocessing module.',
required=False,
default=8,
type=int)
parserOpt.add_argument('--help', '-h', action='help', help='show this help message and exit')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
outputFolder = os.path.dirname(os.path.abspath(args.outFileName)) + '/'
log.debug('outputFolder {}'.format(outputFolder))
if args.cell_coloring_type:
cell_name_cell_type_dict = {}
cell_type_color_dict = {}
color_cell_type_dict = {}
cell_type_counter = 0
with open(args.cell_coloring_type, 'r') as file:
for i, line in enumerate(file.readlines()):
line = line.strip()
try:
cell_name, cell_type = line.split('\t')
except Exception:
cell_name, cell_type = line.split(' ')
cell_name_cell_type_dict[cell_name] = cell_type
if cell_type not in cell_type_color_dict:
cell_type_color_dict[cell_type] = cell_type_counter
color_cell_type_dict[cell_type_counter] = cell_type
cell_type_counter += 1
if args.cell_coloring_batch:
cell_name_cell_type_dict_batch = {}
cell_type_color_dict_batch = {}
color_cell_type_dict_batch = {}
cell_type_counter_batch = 0
with open(args.cell_coloring_batch, 'r') as file:
for i, line in enumerate(file.readlines()):
line = line.strip()
try:
cell_name, cell_type = line.split('\t')
except Exception:
cell_name, cell_type = line.split(' ')
cell_name_cell_type_dict_batch[cell_name] = cell_type
if cell_type not in cell_type_color_dict_batch:
cell_type_color_dict_batch[cell_type] = cell_type_counter_batch
color_cell_type_dict_batch[cell_type_counter_batch] = cell_type
cell_type_counter_batch += 1
raw_file_name = os.path.splitext(os.path.basename(args.outFileName))[0]
neighborhood_matrix, matrices_list = create_csr_matrix_all_cells(args.matrix, args.threads, args.chromosomes, outputFolder, raw_file_name, args.intraChromosomalContactsOnly)
reduce_to_dimension = neighborhood_matrix.shape[0] - 1
if args.dimensionReductionMethod == 'knn':
if args.numberOfNearestNeighbors > reduce_to_dimension:
args.numberOfNearestNeighbors = reduce_to_dimension
nbrs = NearestNeighbors(n_neighbors=args.numberOfNearestNeighbors, algorithm='ball_tree', n_jobs=args.threads).fit(neighborhood_matrix)
neighborhood_matrix = nbrs.kneighbors_graph(mode='distance')
if args.additionalPCA:
pca = PCA(n_components=min(neighborhood_matrix.shape) - 1)
neighborhood_matrix = pca.fit_transform(neighborhood_matrix.todense())
if args.dimensionsPCA:
args.dimensionsPCA = min(args.dimensionsPCA, neighborhood_matrix.shape[0])
neighborhood_matrix = neighborhood_matrix[:, :args.dimensionsPCA]
elif args.dimensionReductionMethod == 'pca':
corrmatrix = np.cov(neighborhood_matrix.todense())
evals, eigs = linalg.eig(corrmatrix)
neighborhood_matrix = eigs[:, :reduce_to_dimension].transpose()
if args.clusterMethod == 'spectral':
spectralClustering_object = SpectralClustering(n_clusters=args.numberOfClusters, n_jobs=args.threads,
n_neighbors=reduce_to_dimension, affinity='nearest_neighbors', random_state=0, eigen_solver="arpack")
labels_clustering = spectralClustering_object.fit_predict(neighborhood_matrix)
elif args.clusterMethod == 'kmeans':
kmeans_object = KMeans(n_clusters=args.numberOfClusters, random_state=0, n_jobs=args.threads, precompute_distances=True)
labels_clustering = kmeans_object.fit_predict(neighborhood_matrix)
if args.colorMap:
colors = process_cmap(args.colorMap)
if args.cell_coloring_type:
if len(colors) < len(cell_type_color_dict):
log.error('The chosen colormap offers too less values for the number of clusters.')
exit(1)
labels_clustering_cell_type = []
for cell_name in matrices_list:
labels_clustering_cell_type.append(cell_type_color_dict[cell_name_cell_type_dict[cell_name]])
labels_clustering_cell_type = np.array(labels_clustering_cell_type)
log.debug('labels_clustering_cell_type: {}'.format(len(labels_clustering_cell_type)))
log.debug('matrices_list: {}'.format(len(matrices_list)))
label_x = 'PC1'
label_y = 'PC2'
if args.createScatterPlot:
if args.dimensionReductionMethod == 'none':
log.warning('Raw matrix clustering scatter plot needs to compute a PCA and can request large amount (> 100 GB) of memory.')
log.debug('args.additionalPCA {}'.format(args.additionalPCA))
log.debug('args.dimensionReductionMethod {}'.format(args.dimensionReductionMethod))
if args.dimensionReductionMethod == 'none' or (args.dimensionReductionMethod == 'knn' and not args.additionalPCA):
log.debug('compute pca')
pca = PCA(n_components=min(neighborhood_matrix.shape) - 1)
neighborhood_matrix_knn = pca.fit_transform(neighborhood_matrix.todense())
log.debug('compute pca')
else:
log.debug('already computed pca')
neighborhood_matrix_knn = neighborhood_matrix
if args.cell_coloring_type:
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:len(cell_type_color_dict)]):
mask = labels_clustering_cell_type == i
log.debug('plot cluster: {} {}'.format(color_cell_type_dict[i], np.sum(mask)))
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(color_cell_type_dict[i]), s=20, alpha=0.7)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '_cell_color.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
if args.cell_coloring_batch:
if len(colors) < len(cell_type_color_dict_batch):
log.error('The chosen colormap offers too less values for the number of clusters.')
exit(1)
labels_clustering_cell_type_batch = []
for cell_name in matrices_list:
labels_clustering_cell_type_batch.append(cell_type_color_dict_batch[cell_name_cell_type_dict_batch[cell_name]])
labels_clustering_cell_type_batch = np.array(labels_clustering_cell_type_batch)
log.debug('labels_clustering_cell_type: {}'.format(len(labels_clustering_cell_type_batch)))
log.debug('matrices_list: {}'.format(len(matrices_list)))
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:len(cell_type_color_dict_batch)]):
mask = labels_clustering_cell_type_batch == i
log.debug('plot cluster: {} {}'.format(color_cell_type_dict_batch[i], np.sum(mask)))
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(color_cell_type_dict_batch[i]), s=20, alpha=0.7)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '_cell_color_batch.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:args.numberOfClusters]):
mask = labels_clustering == i
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(i), s=20, alpha=0.7)
plt.legend(fontsize=args.fontsize)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
if args.latexTable and args.cell_coloring_type:
# compute overlap of cell_type find found clusters
computed_clusters = set(labels_clustering)
cell_type_amounts_dict = {}
# percentage_threshold = 0.8
for threshold in [0.7, 0.8, 0.9]:
cell_type_amounts_dict[threshold] = {}
with open(args.latexTable, 'w') as matches_file:
header = '\\begin{table}[!htb]\n\\footnotesize\n\\begin{tabular}{|l'
body = '\\hline Cluster '
for i in range(len(color_cell_type_dict)):
mask_cell_type = labels_clustering_cell_type == i
header += '|c'
body += '& ' + str(color_cell_type_dict[i]) + ' (' + str(np.sum(mask_cell_type)) + ' cells)'
header += '|}\n'
body += '\\\\\n'
# body = ''
for i in computed_clusters:
body += '\\hline Cluster ' + str(i)
mask_computed_clusters = labels_clustering == i
body += ' (' + str(np.sum(mask_computed_clusters)) + ' cells)'
for j in range(len(cell_type_color_dict)):
mask_cell_type = labels_clustering_cell_type == j
mask = mask_computed_clusters & mask_cell_type
number_of_matches = np.sum(mask)
body += '& ' + str(number_of_matches)
if number_of_matches != 1:
body += ' cells / '
else:
body += ' cell / '
body += '{:.2f}'.format((number_of_matches / np.sum(mask_computed_clusters)) * 100) + ' \\% '
for threshold in [0.7, 0.8, 0.9]:
if number_of_matches / np.sum(mask_computed_clusters) >= threshold:
if color_cell_type_dict[j] in cell_type_amounts_dict[threshold]:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] += number_of_matches
else:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] = number_of_matches
else:
if color_cell_type_dict[j] in cell_type_amounts_dict[threshold]:
continue
else:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] = 0
body += '\\\\\n'
body += '\\hline ' + '&' * len(cell_type_color_dict) + '\\\\\n'
for threshold in [0.7, 0.8, 0.9]:
body += '\\hline Correct identified $>{}\\%$'.format(int(threshold * 100))
for i in range(len(cell_type_color_dict)):
mask_cell_type = labels_clustering_cell_type == i
if color_cell_type_dict[i] in cell_type_amounts_dict[threshold]:
body += '& ' + str(cell_type_amounts_dict[threshold][color_cell_type_dict[i]]) + ' / ' + str(np.sum(mask_cell_type)) + ' ('
body += '{:.2f}'.format((cell_type_amounts_dict[threshold][color_cell_type_dict[i]] / np.sum(mask_cell_type)) * 100)
else:
body += '& ' + str(0) + ' / ' + str(np.sum(mask_cell_type)) + ' ('
body += '{:.2f}'.format(0 / np.sum(mask_cell_type))
body += ' \\%)'
body += '\\\\\n'
body += '\\hline \n'
body += '\\end{tabular}\n\\caption{}\n\\end{table}'
matches_file.write(header)
matches_file.write(body)
matrices_cluster = list(zip(matrices_list, labels_clustering))
np.savetxt(args.outFileName, matrices_cluster, fmt="%s")
| 52.491979 | 195 | 0.596781 | [
"MIT"
] | joachimwolff/scHiCExplorer | schicexplorer/scHicCluster.py | 19,632 | Python |
from PySide import QtGui, QtCore
import os, subprocess, shutil, re
class animQt(QtGui.QMainWindow):
def __init__(self):
super(animQt, self).__init__()
self.setGeometry(250,250,360,100)
style = """
QMainWindow, QMessageBox{
background-color: qradialgradient(spread:pad, cx:0.5, cy:0.5, radius:0.5, fx:0.5, fy:0.5, stop:0.264865 rgba(121, 185, 255, 255), stop:1 rgba(0, 126, 255, 255));
}
QPushButton{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(255, 255, 255, 107), stop:0.464865 rgba(0, 0, 0, 15));
border:1px solid rgb(0, 170, 255);
padding:5px;
color:#FFF;
border-radius:5px;
}
QPushButton:hover{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(0, 0, 0, 15), stop:0.47 rgba(255, 255, 255, 107));
}
QCheckBox{
color:#FFF;
}
QLineEdit{
background-color:rgba(255, 255, 255, 100);
color:#FFF;
border:1px solid rgb(0,170,255);
border-radius:5px;
padding:3px;
}
QLabel{
color:#FFF;
}
QComboBox{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(255, 255, 255, 107), stop:0.464865 rgba(0, 0, 0, 15));
color:#FFF;
padding:5px;
border:1px solid rgb(0, 170, 255);
border-radius:5px;
}
QComboBox:hover{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(0, 0, 0, 15), stop:0.47 rgba(255, 255, 255, 107));
}
QComboBox::drop-down{
subcontrol-origin: padding;
subcontrol-position: top right;
width:25px;
border-left-width: 1px;
border-left-style: solid;
border-top-right-radius: 5px;
border-bottom-right-radius: 5px;
border-left-color: rgb(0, 170, 255);
}
QComboBox::down-arrow{
border-image: url("./down-arrow.png");
height:30px;
width:30px;
}
"""
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(5)
effect.setOffset(2,2)
self.setStyleSheet(style)
self.setWindowTitle("Exe Generator(py2exe)")
centralWidget = QtGui.QWidget()
layout = QtGui.QGridLayout(centralWidget)
self.foldPath = QtGui.QLineEdit(self)
openBtn = QtGui.QPushButton(self)
openBtn.setGraphicsEffect(effect)
openBtn.setText("Select File")
openBtn.clicked.connect(self.fileBrowser)
pyPathInit = QtGui.QLabel(self)
pyPathInit.setText("Select Python Version")
self.pyPath = QtGui.QComboBox(self)
self.pyPath.activated.connect(self.changePyPath)
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(5)
effect.setOffset(2, 2)
self.pyPath.setGraphicsEffect(effect)
self.checkBox = QtGui.QCheckBox(self)
self.checkBox.setText("Window Mode")
checkBtn = QtGui.QPushButton(self)
checkBtn.clicked.connect(self.createSetup)
checkBtn.setText("Process")
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(5)
effect.setOffset(2, 2)
checkBtn.setGraphicsEffect(effect)
layout.addWidget(self.foldPath, 0, 0, 1, 2)
layout.addWidget(openBtn, 0, 2, 1, 1)
layout.addWidget(pyPathInit, 1, 0, 1, 1)
layout.addWidget(self.pyPath, 1, 1, 1, 2)
layout.addWidget(self.checkBox, 2, 0, 1, 2)
layout.addWidget(checkBtn, 2, 2, 1, 1)
self.setCentralWidget(centralWidget)
self.getInstalledPy()
def fileBrowser(self):
browse = QtGui.QFileDialog.getOpenFileName(self, "Select File")
self.foldPath.setText(browse[0])
self.foldName = os.path.dirname(browse[0])
self.filePath = browse[0]
# self.createSetup()
def changePyPath(self, index):
self.setPath = self.pyPath.itemText(index)
def getInstalledPy(self):
path = "c:/"
self.pyPath.addItem("Select")
for each in os.listdir(path):
if os.path.isdir(path+each):
if re.search("Python\d", each, re.I):
if os.path.exists(path+each+"/python.exe"):
# print path+each+"/python.exe"
self.pyPath.addItem(path+each+"/python.exe")
# self.pyPath.addItem("Z:/workspace_mel/dqepy/py27/Scripts/python.exe")
def createSetup(self):
try:
setupFile = self.foldName.replace('\\','/')+"/setup.py"
with open(setupFile, 'w') as fd:
if not self.checkBox.isChecked():
fd.write("from distutils.core import setup\n")
fd.write("import py2exe\n")
fd.write("setup(console =['%s'])"%os.path.basename(self.filePath))
else:
fd.write("from distutils.core import setup\n")
fd.write("import py2exe\n")
fd.write("setup(windows =['%s'])" % os.path.basename(self.filePath))
self.cmdProcess()
shutil.rmtree('%s/build'%self.foldName.replace('\\','/'))
os.rename("dist",os.path.basename(self.filePath).split('.')[0])
self.displayError(parent=self, m="Process done successfully!!!", t="Process Done")
except Exception as e:
self.displayError(parent=self, m="Please Enter all the values\nbefore clicking process button", t="Invalid Values", type=QtGui.QMessageBox.Critical)
def cmdProcess(self):
with open("runBatch.bat", 'w') as fd:
fd.write("@echo off\n")
fd.write("cd %s\n" % self.foldName)
fd.write("%s\n"%self.foldName.replace('\\','/').split("/")[0])
fd.write('%s setup.py py2exe'%self.setPath)
try:
subprocess.call("runBatch.bat", 0, None, None, None, None)
except:
self.displayError(parent=self, m="Python modules were missing in the Python Interpreter\nPlease make sure you had py2exe module", t="Invalid Python Version", type=QtGui.QMessageBox.Critical)
os.remove("runBatch.bat")
def displayError(self, parent, m=None, t="Error found", type=QtGui.QMessageBox.Information, details = ""):
dError = QtGui.QMessageBox(parent)
dError.setText(m)
dError.setWindowTitle(t)
dError.setIcon(type)
dError.setStandardButtons(QtGui.QMessageBox.Ok)
dError.setEscapeButton(QtGui.QMessageBox.Ok)
if details != "":
dError.setDetailedText(details)
dError.show()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
gui = animQt()
gui.show()
sys.exit(app.exec_()) | 39.959538 | 202 | 0.592362 | [
"Unlicense"
] | sunil1239/FuelEfficiencyInfo | testing.py | 6,913 | Python |
from .snowflake import Snowflake
| 16.5 | 32 | 0.848485 | [
"MIT"
] | ronmorgen1/snowflake | snowflake/__init__.py | 33 | Python |
# -*- coding: utf-8 -*-
import codecs
import re
from os import path
from distutils.core import setup
from setuptools import find_packages
def read(*parts):
return codecs.open(path.join(path.dirname(__file__), *parts),
encoding='utf-8').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-floppyforms',
version=find_version('floppyforms', '__init__.py'),
author='Bruno Renié',
author_email='[email protected]',
packages=find_packages(exclude=["tests.*", "tests"]),
include_package_data=True,
url='https://github.com/gregmuellegger/django-floppyforms',
license='BSD licence, see LICENSE file',
description='Full control of form rendering in the templates',
long_description='\n\n'.join((
read('README.rst'),
read('CHANGES.rst'))),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
zip_safe=False,
)
| 31.145833 | 68 | 0.628094 | [
"BSD-3-Clause"
] | greyside/django-floppyforms | setup.py | 1,496 | Python |
from PauliInteraction import PauliInteraction
from Ising import Ising
from CoefficientGenerator import CoefficientGenerator
from Evaluator import Evaluator
#from DataVisualizer import DataVisualizer
#from DataLogger import DataLogger
import Driver as Driver_H
from MasterEquation import MasterEquation
from QuantumAnnealer import QuantumAnnealer
from Test import test
#from mpi4py import MPI
import qutip as qp
import numpy as np
import time as time
import sys
X_HAT = "X"
Y_HAT = "Y"
Z_HAT = "Z"
#NUM_TESTS = 1
#N = 7
T = 100
#NUM_INTERACTIONS = int((N * (N - 1)) / 2)
RAND_COEF = False
MASTER_RANK = 0
# MAIN TEST CASES FROM SAMPLE CODE
# THIS IS NOT USED!
def main():
print(argv)
COMM = MPI.COMM_WORLD
NUM_PROC = COMM.Get_size()
M_RANK = COMM.Get_rank()
N = 100
m_N = (int) (N / NUM_PROC)
if M_RANK == MASTER_RANK:
A = np.arange(N, dtype = np.float64)
start_time = MPI.Wtime()
else:
A = np.empty(N, dtype = np.float64)
m_A = np.empty(m_N, dtype = np.float64)
# Scatter
COMM.Scatter([A, MPI.DOUBLE], [m_A, MPI.DOUBLE])
for i in range(m_N):
m_A[i] = M_RANK
COMM.Barrier()
COMM.Allgather([m_A, MPI.DOUBLE], [A, MPI.DOUBLE])
COMM.Barrier()
if M_RANK == MASTER_RANK:
print(A)
#for i in range(10):
#test(RAND_COEF, i)
# THIS IS USED!
def parallel_main(NUM_TESTS, NUM_QUBITS):
# MPI Initialization
COMM = MPI.COMM_WORLD
NUM_PROC = COMM.Get_size()
M_RANK = COMM.Get_rank()
# If process is master rank, allocate array of overlap probabilities
if M_RANK == MASTER_RANK:
overlap_probabilities = np.zeros(NUM_TESTS, dtype = np.float64)
start_time = MPI.Wtime()
else:
overlap_probabilities = np.empty(NUM_TESTS, dtype = np.float64);
# Calculate the local number of tests to perform
M_TESTS = (int) (NUM_TESTS / NUM_PROC)
# Allocate local overlap probablity arrays
m_overlap_probabilities = np.empty(M_TESTS, dtype = np.float64)
# Scatter the global overlap probabilities
COMM.Scatter([overlap_probabilities, MPI.DOUBLE], [m_overlap_probabilities, MPI.DOUBLE])
# And for each process, perform its local tests and save overlap probability
for i in range(M_TESTS):
m_overlap_probabilities[i] = test(RAND_COEF, 0, NUM_QUBITS)
# Enforce synchronization
COMM.Barrier()
# Gather the local overlap probabilities in master rank
COMM.Allgather([m_overlap_probabilities, MPI.DOUBLE], [overlap_probabilities, MPI.DOUBLE])
# Enforce synchronization
COMM.Barrier()
# When tests are done, master rank will process data and print
if M_RANK == MASTER_RANK:
stop_time = MPI.Wtime()
total_time = stop_time - start_time
# Print probabilities - TODO(Log this to a file, not just print to screen)
#for i in range(len(overlap_probabilities)):
#print("ITERATION %d, OVERLAP PROBABILITY = %f" % (i, overlap_probabilities[i]))
# Print run statistics
print("---------- NUMBER OF QUBITS = %d ----------" % NUM_QUBITS)
print("\tNUMBER OF PROCESSES = %d" % NUM_PROC)
print("\tNUMBER OF TESTS = %d" % NUM_TESTS)
print("\tTOTAL TIME = %f sec" % total_time)
print("------------------------------------------")
# Initial script
if __name__ == "__main__":
NUM_TESTS = int(sys.argv[1])
NUM_QUBITS = int(sys.argv[2])
parallel_main(NUM_TESTS, NUM_QUBITS)
| 26.007519 | 94 | 0.665799 | [
"MIT"
] | nicksacco17/Quantum_Information | main.py | 3,459 | Python |
from app.api.database.connect import user_information_collection
from app.api.database.execute.base_execute import BaseExecute
from app.api.helpers.convert_model2dict import user_information_helper
class ExerciseTrainerExecute(BaseExecute):
def __init__(self, data_collection, data_helper):
super().__init__(data_collection, data_helper)
user_information_execute = ExerciseTrainerExecute(user_information_collection, user_information_helper)
| 35.307692 | 103 | 0.849673 | [
"MIT"
] | dhuynguyen94/base-code-fastapi-mongodb | {{cookiecutter.project_slug}}/app/api/database/execute/user_information.py | 459 | Python |
#
# run this command
# $ FLASK_APP=rest.py flask run
#
# request like this
# curl -X POST -H 'Accept:application/json' -H 'Content-Type:application/json' -d '{"start-time":"2019-05-08 09:15", "end-time":"2019-05-08 09:30", "match":"error", "user":"syslog", "password":"mvEPMNThq94LQuys68gR", "count":"true", "sum":"false", "exact":"false"}' localhost:5000/
#
import os
import sys
import tempfile
sys.path.append(os.path.join(os.path.dirname(__file__), '../../lib'))
import logging
from logging.handlers import SysLogHandler
from hayabusa import HayabusaBase
from hayabusa.errors import HayabusaError, CLIClientError
from hayabusa.rest_client import RESTClient
from flask import Flask, request, jsonify
app = Flask(__name__)
def print_result(stderr, stdout, count, sum):
if stderr:
return eys.stderr.write(stderr.rstrip() + '\n')
if stdout:
if count and sum:
return sys.stdout.write(stdout + '\n')
else:
with tempfile.TemporaryFile() as f:
f.write(stdout.encode('utf-8'))
f.seek(0)
max_lines = 100
lines = f.readlines(max_lines)
while lines:
for line in lines:
if line == b'\n':
continue
sys.stdout.write(line.decode('utf-8'))
lines = f.readlines(max_lines)
@app.route('/', methods=['POST'])
def post_json():
json = request.get_json()
start_time = json['start-time']
end_time = json['end-time']
match = json['match']
user = json['user']
password = json['password']
count = True if json['count'].lower() == 'true' else False
sum = True if json['sum'].lower() == 'true' else False
exact = True if json['exact'].lower() == 'true' else False
stdout = ''
stderr = ''
exit_status = None
data = None
request_id = None
HB = HayabusaBase()
config = HB.load_config()
print(config)
logger = HB.set_logger('hayabusa-restapi', logging.DEBUG, False)
try:
client = RESTClient(config, logger)
request_id, data = client.search(user, password, match,
start_time, end_time,
count, sum, exact)
try:
stdout = data['stdout']
stderr = data['stderr']
exit_status = data['exit_status']
except KeyError as e:
raise CLIClientError('Not Found %s in Received Data' % e)
if type(exit_status) != int:
err = 'Invalid exit status (not int) Received: %s (type: %s)'
raise CLIClientError(err % (exit_status, type(exit_status)))
except HayabusaError as e:
sys.stderr.write('%s: %s\n' % (e.__class__.__name__, e))
exit(1)
except Exception as e:
sys.stderr.write('Unexpected Error: %s, %s\n\n' %
(e.__class__.__name__, e))
raise
result = {}
result['result'] = data['stdout']
result['error'] = data['stderr']
return jsonify(result)
| 32.257732 | 282 | 0.569191 | [
"MIT"
] | hirolovesbeer/hayabusa2 | webui/rest/rest.py | 3,129 | Python |
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
| 46.326264 | 120 | 0.610501 | [
"MIT"
] | michaelbrownuc/GadgetSetAnalyzer | src/static_analyzer/Gadget.py | 28,398 | Python |
import hashlib
import requests
import sys
def valid_proof(last_proof, proof):
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:6] == "000000"
def proof_of_work(last_proof):
"""
Simple Proof of Work Algorithm
- Find a number p' such that hash(pp') contains 6 leading
zeroes, where p is the previous p'
- p is the previous proof, and p' is the new proof
"""
print(f'\nSearch for proof initialized.\n')
proof = 0
while valid_proof(last_proof, proof) is False:
proof += 1
print(f'\nSearch for proof complete, proof is {proof}\n')
return proof
if __name__ == '__main__':
# What node are we interacting with?
if len(sys.argv) > 1:
node = sys.argv[1]
else:
node = "http://localhost:5000"
coins_mined = 0
# Run forever until interrupted
while True:
# Get the last proof from the server and look for a new one
proof = requests.get(url=node + '/last_proof')
new_proof = proof_of_work(proof.json()['proof'])
# When found, POST it to the server {"proof": new_proof}
data = {'proof': new_proof}
attempt = requests.post(url=node + '/mine',json=data)
# If the server responds with 'New Block Forged'
if attempt.json()['message'] == 'New Block Forged':
# add 1 to the number of coins mined and print it.
coins_mined += 1
print("TOTAL COINS MINED:", coins_mined)
else:
# else print the message from the server.
print(attempt.json()['message'])
| 29.303571 | 67 | 0.61365 | [
"MIT"
] | lambda-projects-lafriedel/Blockchain | client_mining_p/miner.py | 1,641 | Python |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from collections.abc import Iterable
from typing import Any, Union
import numpy as np
from typing_extensions import Protocol
from cirq import linalg
from cirq._doc import doc_private
from cirq.protocols.approximate_equality_protocol import approx_eq
class SupportsEqualUpToGlobalPhase(Protocol):
"""Object which can be compared for equality mod global phase."""
@doc_private
def _equal_up_to_global_phase_(self, other: Any, *, atol: Union[int, float]) -> bool:
"""Approximate comparator.
Types implementing this protocol define their own logic for comparison
with other types.
Args:
other: Target object for comparison of equality up to global phase.
atol: The minimum absolute tolerance. See `np.isclose()`
documentation for details.
Returns:
True if objects are equal up to a global phase, False otherwise.
Returns NotImplemented when checking equality up to a global phase
is not implemented for given types.
"""
def equal_up_to_global_phase(val: Any, other: Any, *, atol: Union[int, float] = 1e-8) -> bool:
"""Determine whether two objects are equal up to global phase.
If `val` implements a `_equal_up_to_global_phase_` method then it is
invoked and takes precedence over all other checks:
- For complex primitive type the magnitudes of the values are compared.
- For `val` and `other` both iterable of the same length, consecutive
elements are compared recursively. Types of `val` and `other` does not
necessarily needs to match each other. They just need to be iterable and
have the same structure.
- For all other types, fall back to `_approx_eq_`
Args:
val: Source object for approximate comparison.
other: Target object for approximate comparison.
atol: The minimum absolute tolerance. This places an upper bound on
the differences in *magnitudes* of two compared complex numbers.
Returns:
True if objects are approximately equal up to phase, False otherwise.
"""
# Attempt _equal_up_to_global_phase_ for val.
eq_up_to_phase_getter = getattr(val, '_equal_up_to_global_phase_', None)
if eq_up_to_phase_getter is not None:
result = eq_up_to_phase_getter(other, atol)
if result is not NotImplemented:
return result
# Fall back to _equal_up_to_global_phase_ for other.
other_eq_up_to_phase_getter = getattr(other, '_equal_up_to_global_phase_', None)
if other_eq_up_to_phase_getter is not None:
result = other_eq_up_to_phase_getter(val, atol)
if result is not NotImplemented:
return result
# Fall back to special check for numeric arrays.
# Defer to numpy automatic type casting to determine numeric type.
if isinstance(val, Iterable) and isinstance(other, Iterable):
a = np.asarray(val)
b = np.asarray(other)
if a.dtype.kind in 'uifc' and b.dtype.kind in 'uifc':
return linalg.allclose_up_to_global_phase(a, b, atol=atol)
# Fall back to approx_eq for compare the magnitude of two numbers.
if isinstance(val, numbers.Number) and isinstance(other, numbers.Number):
result = approx_eq(abs(val), abs(other), atol=atol) # type: ignore
if result is not NotImplemented:
return result
# Fall back to cirq approx_eq for remaining types.
return approx_eq(val, other, atol=atol)
| 40.60396 | 94 | 0.710802 | [
"Apache-2.0"
] | 95-martin-orion/Cirq | cirq-core/cirq/protocols/equal_up_to_global_phase_protocol.py | 4,101 | Python |
import os
import tempfile
import pytest
import mlagents.trainers.tensorflow_to_barracuda as tf2bc
from mlagents.trainers.tests.test_nn_policy import create_policy_mock
from mlagents.trainers.settings import TrainerSettings
from mlagents.tf_utils import tf
from mlagents.model_serialization import SerializationSettings, export_policy_model
def test_barracuda_converter():
path_prefix = os.path.dirname(os.path.abspath(__file__))
tmpfile = os.path.join(
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()) + ".nn"
)
# make sure there are no left-over files
if os.path.isfile(tmpfile):
os.remove(tmpfile)
tf2bc.convert(path_prefix + "/BasicLearning.pb", tmpfile)
# test if file exists after conversion
assert os.path.isfile(tmpfile)
# currently converter produces small output file even if input file is empty
# 100 bytes is high enough to prove that conversion was successful
assert os.path.getsize(tmpfile) > 100
# cleanup
os.remove(tmpfile)
@pytest.mark.parametrize("discrete", [True, False], ids=["discrete", "continuous"])
@pytest.mark.parametrize("visual", [True, False], ids=["visual", "vector"])
@pytest.mark.parametrize("rnn", [True, False], ids=["rnn", "no_rnn"])
def test_policy_conversion(tmpdir, rnn, visual, discrete):
tf.reset_default_graph()
dummy_config = TrainerSettings(output_path=os.path.join(tmpdir, "test"))
policy = create_policy_mock(
dummy_config, use_rnn=rnn, use_discrete=discrete, use_visual=visual
)
policy.save_model(1000)
settings = SerializationSettings(
policy.model_path, os.path.join(tmpdir, policy.brain.brain_name)
)
export_policy_model(settings, policy.graph, policy.sess)
# These checks taken from test_barracuda_converter
assert os.path.isfile(os.path.join(tmpdir, "test.nn"))
assert os.path.getsize(os.path.join(tmpdir, "test.nn")) > 100
| 37.211538 | 86 | 0.740052 | [
"Apache-2.0"
] | TPihko/ml-agents | ml-agents/mlagents/trainers/tests/test_barracuda_converter.py | 1,935 | Python |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
| 41.516393 | 104 | 0.699161 | [
"Apache-2.0"
] | KodeWorker/tensorflow | tensorflow/python/training/checkpoint_utils.py | 20,260 | Python |
from typing import Dict
from handler import Context, Arguments, CommandResult
from rpg.items import Item
from utils.formatting import codeblock
from utils.command_helpers import get_author_player
async def run(ctx: Context, args: Arguments) -> CommandResult:
player = await get_author_player(ctx)
if player.inventory.size:
counts: Dict[Item, int] = {}
for item in player.inventory:
counts[item] = counts.get(item, 0) + 1
inventory = "\n".join(
f"{item}{' x ' + str(count) if count > 1 else ''}"
for item, count in counts.items()
)
else:
inventory = "Ваш инвентарь пуст"
equipment_item_map = [
(slot, getattr(player.equipment, slot)) for slot in player.equipment._slots
]
equipment = "\n".join(f"{slot:>10}: {item}" for (slot, item) in equipment_item_map)
return codeblock(f"Экипировка:\n\n{equipment}\n\nИнвентарь:\n\n{inventory}")
| 30.709677 | 87 | 0.652311 | [
"MIT"
] | Tarakania/discord-bot | tarakania_rpg/commands/rpg/inventory/inventory.py | 987 | Python |
from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
| 40.028365 | 93 | 0.614591 | [
"BSD-2-Clause"
] | DrTodd13/numba | numba/core/pythonapi.py | 66,327 | Python |
from rest_framework import serializers
from .models import Post, Comment, Friendship, Follow, Server, PostCategory, PostAuthorizedAuthor
# REST API Serializer JSON https://www.youtube.com/watch?v=V4NjlXiu5WI
from users.models import CustomUser
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = '__all__' #['id', 'username', 'password', 'last_login']
# "id", "last_login", "is_superuser", "first_name", "last_name",
# "email", "is_staff", "is_active", "date_joined", "username",
# "password", "admin", "timestamp", "groups", "user_permissions"
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
class PostCategorySerializer(serializers.ModelSerializer):
class Meta:
model = PostCategory
fields = '__all__'
class PostAuthorizedAuthorSerializer(serializers.ModelSerializer):
class Meta:
model = PostAuthorizedAuthor
fields = '__all__'
class FriendshipSerializer(serializers.ModelSerializer):
class Meta:
model = Friendship
fields = '__all__'
class FollowSerializer(serializers.ModelSerializer):
class Meta:
model = Follow
fields = '__all__'
class ServerSerializer(serializers.ModelSerializer):
# id = serializers.ReadOnlyField()
class Meta:
model = Server
fields = '__all__' | 30.843137 | 97 | 0.682136 | [
"MIT"
] | cjlee1/group-project-cmput404 | backend/project404_t8/API/serializers.py | 1,573 | Python |
Version = "5.6.5"
if __name__ == "__main__":
print (Version)
| 11.166667 | 26 | 0.597015 | [
"BSD-3-Clause"
] | webpie/webpie | webpie/Version.py | 67 | Python |
import csv
from default_clf import DefaultNSL
from itertools import chain
from time import process_time
import numpy as np
import pandas as pd
NUM_PASSES = 100
NUM_ACC_PASSES = 50
TRAIN_PATH = 'data/KDDTrain+.csv'
TEST_PATH = 'data/KDDTest+.csv'
ATTACKS = {
'normal': 'normal',
'back': 'DoS',
'land': 'DoS',
'neptune': 'DoS',
'pod': 'DoS',
'smurf': 'DoS',
'teardrop': 'DoS',
'mailbomb': 'DoS',
'apache2': 'DoS',
'processtable': 'DoS',
'udpstorm': 'DoS',
'ipsweep': 'Probe',
'nmap': 'Probe',
'portsweep': 'Probe',
'satan': 'Probe',
'mscan': 'Probe',
'saint': 'Probe',
'ftp_write': 'R2L',
'guess_passwd': 'R2L',
'imap': 'R2L',
'multihop': 'R2L',
'phf': 'R2L',
'spy': 'R2L',
'warezclient': 'R2L',
'warezmaster': 'R2L',
'sendmail': 'R2L',
'named': 'R2L',
'snmpgetattack': 'R2L',
'snmpguess': 'R2L',
'xlock': 'R2L',
'xsnoop': 'R2L',
'worm': 'R2L',
'buffer_overflow': 'U2R',
'loadmodule': 'U2R',
'perl': 'U2R',
'rootkit': 'U2R',
'httptunnel': 'U2R',
'ps': 'U2R',
'sqlattack': 'U2R',
'xterm': 'U2R'
}
def get_current_charge():
try:
with open('/sys/class/power_supply/BAT0/charge_now') as f:
return int(f.readline())
except IOError:
print("Cannot find current battery charge.")
return 0
def check_load_training(clf, path):
start = process_time()
clf.load_training_data(path)
end = process_time()
return end - start
def check_load_testing(clf, path):
start = process_time()
clf.load_test_data(path)
end = process_time()
return end - start
def check_training(clf):
start = process_time()
clf.train_clf()
end = process_time()
return end - start
def check_testing_entire_dataset(clf, train=False):
start = process_time()
clf.test_clf(train)
end = process_time()
return end - start
def check_predict_row(clf, row):
start = process_time()
clf.predict(row)
end = process_time()
return end - start
def get_stats(arr, function, *args, **kwargs):
charge_start = get_current_charge()
for i in range(NUM_PASSES):
arr[i] = function(*args, **kwargs)
charge_end = get_current_charge()
mean = arr.mean()
std = arr.std()
return [mean, std, (charge_start - charge_end)]
def evaluate_power(clf):
res = np.empty(shape=(NUM_PASSES, 1))
load_train = get_stats(res, check_load_training, clf, TRAIN_PATH)
print('Loading Training: ', load_train)
load_test = get_stats(res, check_load_testing, clf, TEST_PATH)
print('Loading Testing: ', load_test)
train = get_stats(res, check_training, clf)
print('Training: ', train)
test_dataset = get_stats(res, check_testing_entire_dataset, clf)
print('Testing dataset: ', test_dataset)
row = clf.testing[0].iloc[0].values.reshape(1, -1)
test_row = get_stats(res, check_predict_row, clf, row)
print('Testing one row: ', test_row)
with open('results.csv', 'a', newline='') as csvf:
csv_writer = csv.writer(csvf)
csv_writer.writerow([clf.__class__.__name__, 'Number of Passes:', NUM_PASSES, 'Power'])
csv_writer.writerow(['Function', 'Time (s) Mean', 'Time Std',
'Total Power (microwatt-hour)'])
csv_writer.writerow(['Loading Training Data'] + load_train)
csv_writer.writerow(['Loading Testing Data'] + load_test)
csv_writer.writerow(['Training Classifier'] + train)
csv_writer.writerow(['Testing Dataset'] + test_dataset)
csv_writer.writerow(['Testing One Row'] + test_row)
def evaluate_accuracy(clf):
acc = np.empty(shape=(NUM_ACC_PASSES, 1))
clf.load_training_data(TRAIN_PATH)
clf.load_test_data(TEST_PATH)
cat_labels = clf.testing[1].apply(lambda x: ATTACKS[x])
cats = {'U2R':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'DoS':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'R2L':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'Probe':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'normal':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))]}
for i in range(0, NUM_ACC_PASSES):
clf.train_clf()
preds, acc[i] = clf.test_clf()
for cat, pred in zip(cat_labels, preds):
cats[cat][pred == 'normal'][i] += 1
clf.shuffle_training_data()
conf = calculate_category_accuracy(cats)
mean = acc.mean()
std = acc.std()
write_acc_to_csv([mean, std], cats, conf, clf.__class__.__name__)
return [mean, std]
def calculate_category_accuracy(cats):
conf = {'TN':np.zeros(shape=(NUM_ACC_PASSES, 1)), 'TP':np.zeros(shape=(NUM_ACC_PASSES, 1)),
'FN':np.zeros(shape=(NUM_ACC_PASSES, 1)), 'FP':np.zeros(shape=(NUM_ACC_PASSES, 1))}
for key, values in cats.items():
correct = values[0]
wrong = values[1]
if key == 'normal':
correct, wrong = wrong, correct
conf['TN'] += correct
conf['FP'] += wrong
else:
conf['TP'] += correct
conf['FN'] += wrong
avg = correct/(correct+wrong)
cats[key] = [avg.mean(), avg.std()]
return conf
def write_acc_to_csv(acc, cats, conf, name):
with open('results.csv', 'a', newline='') as csvf:
csv_writer = csv.writer(csvf)
csv_writer.writerow([name, 'Number of Passes:', NUM_ACC_PASSES, 'Accuracy'])
csv_writer.writerow(['Statistic', 'Mean', 'STD'])
csv_writer.writerow(['Accuracy'] + acc)
for key, values in cats.items():
csv_writer.writerow([key] + values)
for key, values in conf.items():
csv_writer.writerow([key, values.mean(), values.std()])
| 30.880208 | 96 | 0.611908 | [
"MIT"
] | AnomalyIDSBenchmark/MLBenchmark | benchmark.py | 5,929 | Python |
"""
AKShare 是基于 Python 的开源财经数据接口库, 实现对股票, 期货, 期权, 基金, 债券, 外汇等金
融产品的量价数据, 基本面数据和另类数据从数据采集, 数据清洗到数据下载的工具, 满足金融数据科学
家, 数据科学爱好者在数据获取方面的需求. 它的特点是利用 AKShare 获取的是基于可信任数据源
发布的原始数据, 广大数据科学家可以利用原始数据进行再加工, 从而得出科学的结论.
"""
"""
版本更新记录:
0.1.13
更新所有基于 fushare 的接口
0.1.14
更新 requirements.txt 文件
0.1.15
自动安装所需要的 packages
0.1.16
修正部分函数命名
0.1.17
更新版本号自动管理
0.1.18
更新说明文档
0.1.19
修正 cot.py 中请求错误
0.1.20
修正 __doc__
0.1.21
修复 __doc__
0.1.22
修复命名和绘图
0.1.23
修复错误机制
0.1.24
增加奇货可查所有指数数据获取接口
0.1.25
修复 qhck 接口
0.1.26
修复代码格式问题
0.1.27
修复说明格式问题
0.1.28
更新说明文档
0.1.29
规范说明文档格式
0.1.30
规范说明文档格式
0.1.31
规范 cot.py 函数说明
0.1.32
update futures_basis.py
0.1.33
增加奇货可查数据三个接口:
get_qhkc_index, get_qhkc_index_trend, get_qhkc_index_profit_loss
使用方法请 help(get_qhkc_index) 查看
0.1.34
增加奇货可查-资金数据三个接口:
get_qhkc_fund_position_change, get_qhkc_fund_bs, get_qhkc_fund_position
使用方法请 help(get_qhkc_fund_position_change) 查看
0.1.35
增加奇货可查-工具-外盘比价接口:
get_qhkc_tool_foreign
使用方法请 help(get_qhkc_tool_foreign) 查看
0.1.36
增加奇货可查-工具-各地区经济数据接口:
get_qhkc_tool_gdp
使用方法请 help(get_qhkc_tool_gdp) 查看
0.1.37
增加中国银行间市场交易商协会-债券接口
get_bond_bank
使用方法请 help(get_bond_bank) 查看
0.1.38
修正
0.1.39
模块化处理
0.1.40
统一接口函数参数 start --> start_day; end --> end_day
0.1.41
更新大连商品交易所-苯乙烯-EB品种
0.1.42
更新上海期货交易所-上海国际能源交易中心-20号胶-NR品种
更新上海期货交易所-不锈钢-SS品种
0.1.43
修复 example --> test.py 函数调用
0.1.44
修复 example --> daily_run.py 函数调用
0.1.45
修复 akdocker.md 函数接口调用说明和感谢单位
0.1.46
修复 akdocker.md 图片显示
0.1.47
修复 akdocker.md 增加说明部分
0.1.48
更新大连商品交易所-粳米-RR品种
0.1.49
增加智道智科-私募指数数据接口
使用方法请 help(get_zdzk_fund_index) 查看
0.1.50
更新 akdocker.md 文件
0.1.51
更新官方文档: https://akshare.readthedocs.io
0.1.52
增加量化策略和量化平台板块
0.1.53
增加期货品种列表和名词解释
0.1.54
修改 AkShare的初衷, 增加管理期货策略指数
0.1.55
新增 99期货(http://www.99qh.com/d/store.aspx) 库存数据接口
0.1.56
修复 99期货(http://www.99qh.com/d/store.aspx) 库存数据接口
0.1.57
更新 md 文件数据接口
0.1.58
更新 md 文件数据接口
0.1.59
更新 md 文件数据接口
0.1.60
更新 致谢部分, 申明借鉴和引用的 package
0.1.61
更新说明文档
0.1.62
提供英为财情-股票指数-全球股指与期货指数数据接口
https://cn.investing.com/indices/
0.1.63
更新说明文档-致谢英为财情
0.1.64
更新 get_country_index 返回格式为日期索引
0.1.65
更新 get_country_index 返回格式数据开盘, 收盘, 高, 低为浮点型
0.1.66
提供英为财情-股票指数-全球股指与期货指数数据接口
https://cn.investing.com/rates-bonds/
新增 get_country_bond 返回格式数据开盘, 收盘, 高, 低为浮点型
0.1.67
更新说明文档-私募指数数据说明
0.1.68
更新说明文档-私募指数数据说明-增加图片
0.1.69
更新说明文档-债券说明格式调整
0.1.70
更新大商所, 郑商所商品期权数据接口
0.1.71
更新大商所, 郑商所, 上期所商品期权数据接口
0.1.72
修改大商所, 郑商所, 上期所商品期权数据接口
增加函数说明
更新说明文档-期权部分
0.1.73
更新说明文档-期权部分
0.1.74
更新说明文档格式调整
0.1.75
新增外汇接口, 银行间债券市场行情数据接口
0.1.76
更新说明文档
0.1.77
新增全球期货历史数据查询接口
0.1.78
新增全球宏观数据-中国宏观数据
年度、月度CPI数据, 年度M2数据
0.1.79
更新说明文档
0.1.80
更新说明文档-刷新
0.1.81
新增全球宏观数据-中国宏观数据
中国年度PPI数据
中国年度PMI数据
中国年度GDP数据
中国年度财新PMI数据
中国外汇储备数据
中国电力能源数据
中国年度非制造业PMI数据
人民币中间报价汇率
0.1.82
新增全球宏观数据-美国宏观数据
美联储利率决议报告
美国非农就业人数报告
美国失业率报告
美国EIA原油库存报告
0.1.83
更新说明文档
0.1.84
新增全球宏观数据-美国宏观数据
美国初请失业金人数报告
美国核心PCE物价指数年率报告
美国CPI月率报告
美联储劳动力市场状况指数报告
美国ADP就业人数报告
美国国内生产总值(GDP)报告
美国原油产量报告
新增全球宏观数据-欧洲宏观数据
欧洲央行决议报告
新增全球宏观数据-机构宏观数据
全球最大黄金ETF—SPDR Gold Trust持仓报告
全球最大白银ETF--iShares Silver Trust持仓报告
欧佩克报告
0.1.85
新增期货-仓单有效期接口
0.1.86
更新说明文档
0.1.87
新增和讯财经-企业社会责任数据接口
0.1.88
更新说明文档
0.1.89
更新requirements.txt
0.1.90
更新setup.py
0.1.91
新增和讯财经-中国概念股行情及日频历史数据接口
0.1.92
更新说明文档
0.1.93
新增交易法门-套利工具-跨期价差(自由价差)数据接口
0.1.94
新增生意社-商品与期货-现期图数据接口
新增西本新干线-指数数据
0.1.95
新增新浪财经-期货-实时数据接口
0.1.96
修正新浪财经-期货-实时数据接口-返回 current_price 字段为实时数据
0.1.97
修正新浪财经-期货-实时数据接口-返回 current_price 和 ask_price 字段为实时数据
0.1.98
修正版本更新错误
0.1.99
增加自动安装 pillow
0.2.1
增加港股当日(时点)行情数据和历史数据(前复权和后复权因子)
0.2.2
增加美股当日(时点)行情数据和历史数据(前复权因子)
0.2.3
增加金融期权
0.2.4
增加加密货币行情接口
0.2.5
增加 AKShare 接口导图
0.2.6
更新港股数据接口和说明文档
0.2.7
更新 qhkc_web 接口注释和说明文档
0.2.8
更新说明文档
0.2.9
更新A+H股数据实时行情数据和历史行情数据(后复权)
0.2.10
更新说明文档
0.2.11
更新说明文档
0.2.12
增加A股实时行情数据和历史行情数据
0.2.13
统一股票接口命名
0.2.14
统一股票接口命名, 去除 get
0.2.15
增加科创板实时行情数据和历史行情数据
0.2.16
增加银保监分局本级行政处罚数据
0.2.17
更新说明文档
0.2.18
修正银保监分局本级行政处罚数据接口字段命名
0.2.19
增加 Nodejs 安装说明
0.2.20
增加 Realized Library 接口
0.2.21
更新说明文档
0.2.22
更新说明文档
0.2.23
修正银保监分局本级行政处罚数据接口反扒升级-修改完成
0.2.24
增加FF多因子模型数据接口
0.2.25
更新说明文档
0.2.26
修正期货-实时行情: 接口命名, 字段补充及限制访问速度
0.2.27
增加新浪-外盘期货实时行情数据接口
0.2.28
修正新浪-外盘期货实时行情数据引入
更新文档
0.2.29
更新文档
0.2.30
监管-银保监: 反扒措施在变化, 更新接口
修正期货-国内-实时行情接口订阅问题
0.2.31
修正期货-国内-金融期货实时行情接口订阅问题
0.2.32
更新说明文档
0.2.33
更新说明文档-期货-外盘
0.2.34
新增新浪-指数实时行情和历史行情接口
0.2.35
新增新浪-指数和A股实时行情列表获取问题
0.2.36
新增腾讯财经-A股分笔行情历史数据
0.2.37
新增金十数据-实时监控接口
0.2.38
更新说明文档
0.2.39
更新说明文档目录结构
增加专题教程-pandas专题-连载
0.2.40
更新专题板块
0.2.41
更新说明文件
0.2.42
更新mindmap
0.2.43
重构说明文档-模块化处理, 将 github 说明文档和 docs 在线文档分开处理
重构私募指数接口
0.2.44
增加日出和日落模块
0.2.45
增加河北空气指数数据
0.2.46
更新 requirements.txt
0.2.47
添加初始化文件
0.2.48
添加 websocket-client
0.2.49
南华期货-南华商品指数
0.2.50
修正英为财情-指数板块的成交量显示问题
0.2.51
消除部分警告信息
0.2.52
基差数据缺失错误提示修正
0.2.53
统一南华期货-商品指数历史走势-收益率指数
新增南华期货-商品指数历史走势-价格指数
新增南华期货-商品指数历史走势-波动率指数
0.2.54
添加 numpy 依赖
0.2.55
更新已实现波动率的说明文档
统一 ff_crr --> article_ff_crr
0.2.56
新增经济政策不确定性(EPU)数据接口
更新说明文档
修改示例说明
0.2.57
修改 air_hebei 接口, 默认返回全部城市
0.2.58
新增微博指数
0.2.59
增加西本新干线说明文档
0.2.60
新增百度指数
0.2.61
修正河北空气数据代码
0.2.62
新增百度搜索指数
新增百度资讯指数
新增百度媒体指数
0.2.63
更新指数-legend代码
0.2.64
fix pillow>=6.2.0
0.2.65
新增谷歌指数
0.2.66
修正南华指数URL硬编码问题
0.2.67
修正 get_futures_index 函数中上海期货交易所
CU 出现 cuefp 数据导致指数合成异常的问题
0.2.68
降低 Python 版本要求
0.2.69
降低python版本要求到 Python3.7.1
0.2.70
适配 VNPY 使用
0.2.71
交易法门数据接口
0.2.72
申万行业一级指数-实时
0.2.73
更新纯碱期货数据接口
0.2.74
新增AQI空气质量数据接口
0.2.75
新增申万一级指数接口
0.2.76
统一交易法门登录和数据获取接口
0.2.77
清除冗余函数
0.2.78
Python 降级
0.2.79
Python 降级
0.2.80
Python 3.6
0.2.81
html5lib
0.2.82
websockets-8.1
0.2.83
修复 weibo_index 函数日期格式问题
0.2.84
修复 baidu_index 接口
0.2.85
临时修复 baidu_index 接口
0.2.86
lxml 降级
0.2.87
lxml 降级
更新安装时的错误处理
0.2.88
pypinyin 降级
0.2.89
全国空气质量数据数据格式规范为数值型
0.2.90
更新注册仓单的产品参数和异常错误
0.2.91
世界五百强公司排名接口
0.2.92
更新中国债券市场行情数据接口
0.2.93
增加自动测试模型
0.2.94
增加私募基金管理人信息公示接口
0.2.95
增加中国证券投资基金业协会-信息公示
0.2.96
修复交易法门登录验证码
由于交易法门-数据部分权限缘故, 需要注册后方可使用
0.2.97
更新说明文档
0.2.98
增加甲醇期权和PTA期权
0.2.99
更新外汇数据接口, 规范格式
0.3.0
猫眼电影实时票房
0.3.1
更新说明文档
0.3.2
更新说明文档
0.3.3
更新外盘期货行情订阅时, 统一字段名称与网页端一致
0.3.4
新增能源-碳排放权数据
0.3.5
新增世界各大城市生活成本数据
0.3.6
商品现货价格指数
0.3.7
修复百度指数日期问题
0.3.8
新增中国宏观数据接口和文档说明
0.3.9
新增中国宏观杠杆率数据
0.3.10
修改金融期权数据接口
0.3.11
修复实时票房数据接口
0.3.12
新增新浪主力连续接口
0.3.13
新增新浪主力连续列表
0.3.14
中国倒闭公司名单
0.3.15
中国独角兽名单
中国千里马名单
0.3.16
东方财富-机构调研
0.3.17
东方财富网-数据中心-特色数据-机构调研
机构调研统计
机构调研详细
0.3.18
修复自动测试接口
0.3.19
修复融资融券字段名匹配问题
增加东方财富网-数据中心-特色数据-股票质押
0.3.20
东方财富网-数据中心-特色数据-股权质押
东方财富网-数据中心-特色数据-股权质押-股权质押市场概况: http://data.eastmoney.com/gpzy/marketProfile.aspx
东方财富网-数据中心-特色数据-股权质押-上市公司质押比例: http://data.eastmoney.com/gpzy/pledgeRatio.aspx
东方财富网-数据中心-特色数据-股权质押-重要股东股权质押明细: http://data.eastmoney.com/gpzy/pledgeDetail.aspx
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-证券公司: http://data.eastmoney.com/gpzy/distributeStatistics.aspx
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-银行: http://data.eastmoney.com/gpzy/distributeStatistics.aspx
东方财富网-数据中心-特色数据-股权质押-行业数据: http://data.eastmoney.com/gpzy/industryData.aspx
0.3.21
东方财富网-数据中心-特色数据-商誉
东方财富网-数据中心-特色数据-商誉-A股商誉市场概况: http://data.eastmoney.com/sy/scgk.html
东方财富网-数据中心-特色数据-商誉-商誉减值预期明细: http://data.eastmoney.com/sy/yqlist.html
东方财富网-数据中心-特色数据-商誉-个股商誉减值明细: http://data.eastmoney.com/sy/jzlist.html
东方财富网-数据中心-特色数据-商誉-个股商誉明细: http://data.eastmoney.com/sy/list.html
东方财富网-数据中心-特色数据-商誉-行业商誉: http://data.eastmoney.com/sy/hylist.html
0.3.22
期货规则-交易日历数据表
更新2020交易日历数据
0.3.23
东方财富网-数据中心-特色数据-股票账户统计: http://data.eastmoney.com/cjsj/gpkhsj.html
0.3.24
移除-交易法门系列老函数
因为交易法门网站需要会员登录后访问数据
0.3.25
增加-交易法门-工具-套利分析接口
增加-交易法门-工具-交易规则接口
0.3.26
增加-交易法门-数据-农产品-豆油
增加-交易法门-数据-黑色系-焦煤
增加-交易法门-工具-持仓分析-期货分析
增加-交易法门-工具-持仓分析-持仓分析
0.3.27
交易法门-说明文档
0.3.28
增加-股票指数-股票指数成份股接口
0.3.29
增加-股票指数-股票指数成份股接口-代码注释
0.3.30
增加-义乌小商品指数
0.3.31
修复-银保监分局本级行政处罚数据接口
接口重命名为: bank_fjcf_table_detail
0.3.32
新增-中国电煤价格指数
0.3.33
修复-银保监分局本级行政处罚数据接口-20200108新增字段后适应
0.3.34
增加-交易法门-工具-期限分析-基差日报
增加-交易法门-工具-期限分析-基差分析
增加-交易法门-工具-期限分析-期限结构
增加-交易法门-工具-期限分析-价格季节性
0.3.35
更新说明文档
0.3.36
# 交易法门-工具-仓单分析
增加-交易法门-工具-仓单分析-仓单日报
增加-交易法门-工具-仓单分析-仓单查询
增加-交易法门-工具-仓单分析-虚实盘比查询
# 交易法门-工具-资讯汇总
增加-交易法门-工具-资讯汇总-研报查询
增加-交易法门-工具-资讯汇总-交易日历
# 交易法门-工具-资金分析
增加-交易法门-工具-资金分析-资金流向
0.3.37
更新说明文档
0.3.38
修改-交易法门-工具-资金分析-资金流向函数的字段和说明文档
0.3.39
金十数据中心-经济指标-央行利率-主要央行利率
美联储利率决议报告
欧洲央行决议报告
新西兰联储决议报告
中国央行决议报告
瑞士央行决议报告
英国央行决议报告
澳洲联储决议报告
日本央行决议报告
俄罗斯央行决议报告
印度央行决议报告
巴西央行决议报告
macro_euro_gdp_yoy # 金十数据中心-经济指标-欧元区-国民经济运行状况-经济状况-欧元区季度GDP年率报告
macro_euro_cpi_mom # 金十数据中心-经济指标-欧元区-国民经济运行状况-物价水平-欧元区CPI月率报告
macro_euro_cpi_yoy # 金十数据中心-经济指标-欧元区-国民经济运行状况-物价水平-欧元区CPI年率报告
macro_euro_ppi_mom # 金十数据中心-经济指标-欧元区-国民经济运行状况-物价水平-欧元区PPI月率报告
macro_euro_retail_sales_mom # 金十数据中心-经济指标-欧元区-国民经济运行状况-物价水平-欧元区零售销售月率报告
macro_euro_employment_change_qoq # 金十数据中心-经济指标-欧元区-国民经济运行状况-劳动力市场-欧元区季调后就业人数季率报告
macro_euro_unemployment_rate_mom # 金十数据中心-经济指标-欧元区-国民经济运行状况-劳动力市场-欧元区失业率报告
macro_euro_trade_balance # 金十数据中心-经济指标-欧元区-贸易状况-欧元区未季调贸易帐报告
macro_euro_current_account_mom # 金十数据中心-经济指标-欧元区-贸易状况-欧元区经常帐报告
macro_euro_industrial_production_mom # 金十数据中心-经济指标-欧元区-产业指标-欧元区工业产出月率报告
macro_euro_manufacturing_pmi # 金十数据中心-经济指标-欧元区-产业指标-欧元区制造业PMI初值报告
macro_euro_services_pmi # 金十数据中心-经济指标-欧元区-产业指标-欧元区服务业PMI终值报告
macro_euro_zew_economic_sentiment # 金十数据中心-经济指标-欧元区-领先指标-欧元区ZEW经济景气指数报告
macro_euro_sentix_investor_confidence # 金十数据中心-经济指标-欧元区-领先指标-欧元区Sentix投资者信心指数报告
0.3.40
修复-欧洲央行决议报告
0.3.41
增加-东方财富网-经济数据-银行间拆借利率
0.3.42
# 中国
macro_china_gdp_yearly # 金十数据中心-经济指标-中国-国民经济运行状况-经济状况-中国GDP年率报告
macro_china_cpi_yearly # 金十数据中心-经济指标-中国-国民经济运行状况-物价水平-中国CPI年率报告
macro_china_cpi_monthly # 金十数据中心-经济指标-中国-国民经济运行状况-物价水平-中国CPI月率报告
macro_china_ppi_yearly # 金十数据中心-经济指标-中国-国民经济运行状况-物价水平-中国PPI年率报告
macro_china_exports_yoy # 金十数据中心-经济指标-中国-贸易状况-以美元计算出口年率报告
macro_china_imports_yoy # 金十数据中心-经济指标-中国-贸易状况-以美元计算进口年率
macro_china_trade_balance # 金十数据中心-经济指标-中国-贸易状况-以美元计算贸易帐(亿美元)
macro_china_industrial_production_yoy # 金十数据中心-经济指标-中国-产业指标-规模以上工业增加值年率
macro_china_pmi_yearly # 金十数据中心-经济指标-中国-产业指标-官方制造业PMI
macro_china_cx_pmi_yearly # 金十数据中心-经济指标-中国-产业指标-财新制造业PMI终值
macro_china_cx_services_pmi_yearly # 金十数据中心-经济指标-中国-产业指标-财新服务业PMI
macro_china_non_man_pmi # 金十数据中心-经济指标-中国-产业指标-中国官方非制造业PMI
macro_china_fx_reserves_yearly # 金十数据中心-经济指标-中国-金融指标-外汇储备(亿美元)
macro_china_m2_yearly # 金十数据中心-经济指标-中国-金融指标-M2货币供应年率
macro_china_shibor_all # 金十数据中心-经济指标-中国-金融指标-上海银行业同业拆借报告
macro_china_hk_market_info # 金十数据中心-经济指标-中国-金融指标-人民币香港银行同业拆息
macro_china_daily_energy # 金十数据中心-经济指标-中国-其他-中国日度沿海六大电库存数据
macro_china_rmb # 金十数据中心-经济指标-中国-其他-中国人民币汇率中间价报告
macro_china_market_margin_sz # 金十数据中心-经济指标-中国-其他-深圳融资融券报告
macro_china_market_margin_sh # 金十数据中心-经济指标-中国-其他-上海融资融券报告
macro_china_au_report # 金十数据中心-经济指标-中国-其他-上海黄金交易所报告
macro_china_ctci # 发改委-中国电煤价格指数-全国综合电煤价格指数
macro_china_ctci_detail # 发改委-中国电煤价格指数-各价区电煤价格指数
macro_china_ctci_detail_hist # 发改委-中国电煤价格指数-历史电煤价格指数
macro_china_money_supply # 中国货币供应量
# 美国
macro_usa_gdp_monthly # 金十数据中心-经济指标-美国-经济状况-美国GDP
macro_usa_cpi_monthly # 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
macro_usa_core_cpi_monthly # 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
macro_usa_personal_spending # 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
macro_usa_retail_sales # 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
macro_usa_import_price # 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
macro_usa_export_price # 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
macro_usa_lmci # 金十数据中心-经济指标-美国-劳动力市场-LMCI
macro_usa_unemployment_rate # 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
macro_usa_job_cuts # 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
macro_usa_non_farm # 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
macro_usa_adp_employment # 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
macro_usa_core_pce_price # 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
macro_usa_real_consumer_spending # 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
macro_usa_trade_balance # 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
macro_usa_current_account # 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告
macro_usa_rig_count # 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告
# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告
macro_usa_ppi # 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告
macro_usa_core_ppi # 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告
macro_usa_api_crude_stock # 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告
macro_usa_pmi # 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告
macro_usa_ism_pmi # 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告
macro_usa_nahb_house_market_index # 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告
macro_usa_house_starts # 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告
macro_usa_new_home_sales # 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告
macro_usa_building_permits # 金十数据中心-经济指标-美国-产业指标-房地产-美国营建许可总数报告
macro_usa_exist_home_sales # 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋销售总数年化报告
macro_usa_house_price_index # 金十数据中心-经济指标-美国-产业指标-房地产-美国FHFA房价指数月率报告
macro_usa_spcs20 # 金十数据中心-经济指标-美国-产业指标-房地产-美国S&P/CS20座大城市房价指数年率报告
macro_usa_pending_home_sales # 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋签约销售指数月率报告
macro_usa_cb_consumer_confidence # 金十数据中心-经济指标-美国-领先指标-美国谘商会消费者信心指数报告
macro_usa_nfib_small_business # 金十数据中心-经济指标-美国-领先指标-美国NFIB小型企业信心指数报告
macro_usa_michigan_consumer_sentiment # 金十数据中心-经济指标-美国-领先指标-美国密歇根大学消费者信心指数初值报告
macro_usa_eia_crude_rate # 金十数据中心-经济指标-美国-其他-美国EIA原油库存报告
macro_usa_initial_jobless # 金十数据中心-经济指标-美国-其他-美国初请失业金人数报告
macro_usa_crude_inner # 金十数据中心-经济指标-美国-其他-美国原油产量报告
0.3.43
增加-交易法门-数据-黑色系-焦煤
0.3.44
更新宏观数据
macro_cons_gold_volume # 全球最大黄金ETF—SPDR Gold Trust持仓报告
macro_cons_gold_change # 全球最大黄金ETF—SPDR Gold Trust持仓报告
macro_cons_gold_amount # 全球最大黄金ETF—SPDR Gold Trust持仓报告
macro_cons_silver_volume # 全球最大白银ETF--iShares Silver Trust持仓报告
macro_cons_silver_change # 全球最大白银ETF--iShares Silver Trust持仓报告
macro_cons_silver_amount # 全球最大白银ETF--iShares Silver Trust持仓报告
macro_cons_opec_month # 欧佩克报告-月度
0.3.45
增加中国证券投资基金业协会-信息公示
# 中国证券投资基金业协会-信息公示-会员信息
amac_member_info # 中国证券投资基金业协会-信息公示-会员信息-会员机构综合查询
# 中国证券投资基金业协会-信息公示-从业人员信息
amac_person_org_list # 中国证券投资基金业协会-信息公示-从业人员信息-基金从业人员资格注册信息
# 中国证券投资基金业协会-信息公示-私募基金管理人公示
amac_manager_info # 中国证券投资基金业协会-信息公示-私募基金管理人公示-私募基金管理人综合查询
amac_manager_classify_info # 中国证券投资基金业协会-信息公示-私募基金管理人公示-私募基金管理人分类公示
amac_member_sub_info # 中国证券投资基金业协会-信息公示-私募基金管理人公示-证券公司私募基金子公司管理人信息公示
# 中国证券投资基金业协会-信息公示-基金产品
amac_fund_info # 中国证券投资基金业协会-信息公示-基金产品-私募基金管理人基金产品
amac_securities_info # 中国证券投资基金业协会-信息公示-基金产品-证券公司集合资管产品公示
amac_aoin_info # 中国证券投资基金业协会-信息公示-基金产品-证券公司直投基金
amac_fund_sub_info # 中国证券投资基金业协会-信息公示-基金产品公示-证券公司私募投资基金
amac_fund_account_info # 中国证券投资基金业协会-信息公示-基金产品公示-基金公司及子公司集合资管产品公示
amac_fund_abs # 中国证券投资基金业协会-信息公示-基金产品公示-资产支持专项计划
amac_futures_info # 中国证券投资基金业协会-信息公示-基金产品公示-期货公司集合资管产品公示
# 中国证券投资基金业协会-信息公示-诚信信息
amac_manager_cancelled_info # 中国证券投资基金业协会-信息公示-诚信信息-已注销私募基金管理人名单
0.3.46
更新-商品期权-菜籽粕期权接口
修复 get_sector_futures 字段名问题
0.3.47
增加-商品期权-郑州商品交易所-期权-历史数据
0.3.48
修复 macro_cons_opec_month 接口数据更新问题
0.3.49
新增-交易法门-工具-仓单分析-虚实盘比日报接口
0.3.50
更新-说明文档
0.3.51
修复 macro_cons_opec_month 接口数据更新问题, 统一数据接口跟网页端统一
修复-百度指数-由用户输入cookie来访问数据及说明文档
0.3.52
新增-英为财情-外汇-货币对历史数据
0.3.53
修复-macro_usa_rig_count-接口返回数据
修复-rate_interbank-文档注释
0.3.54
新增-事件接口
新增-事件接口新型冠状病毒-网易
新增-事件接口新型冠状病毒-丁香园
0.3.55
更新-事件接口新型冠状病毒
0.3.56
更新-事件接口新型冠状病毒-全国疫情趋势图
0.3.57
更新-事件接口新型冠状病毒-分省地区
一些细节修复
0.3.58
新增-财富排行榜(英文版)
0.3.59
新增-currency_name_code-接口
0.3.60
修复-财富排行榜(英文版)-索引乱序问题
0.3.61
修复-事件接口新型冠状病毒-hospital-接口
0.3.62
修复-20200203交易日问题
0.3.63
修复-事件接口新型冠状病毒-网易接口
0.3.64
修复-事件接口新型冠状病毒-丁香园接口
0.3.65
修复-calendar.json 问题, 感谢 fxt0706
0.3.66
修复-epu_index-加载问题
0.3.67
修复-option_commodity-json数据加载问题
0.3.68
更名函数 movie_board -> box_office_spot
0.3.69
新增-epidemic_baidu
百度-新型冠状病毒肺炎-疫情实时大数据报告
0.3.70
修复-epidemic_dxy-字段问题
0.3.71
修复-epidemic_dxy-具体省份字段问题
0.3.72
新增-百度迁徙地图接口
0.3.73
修复文字表述
0.3.74
修复-epidemic_163-数据更新问题
0.3.75
修复-epidemic_dxy-图片显示问题
0.3.76
新增-stock_zh_index_daily_tx-补充新浪指数的数据缺失问题
0.3.77
修复-epidemic_163-数据更新问题
0.3.78
新增-bond_china_yield-中国债券信息网-国债及其他债券收益率曲线
0.3.79
修改-bond_china_yield-参数
0.3.80
新增-基金数据接口
0.3.81
新增-基金数据接口-净值
0.3.82
新增-小区查询
新增-相同行程查询
0.3.83
新增-交易法门-工具-套利分析-FullCarry
修改-交易法门-工具-期限分析-基差分析
0.3.84
新增-货币对-投机情绪报告
0.3.85
修复-epidemic_area_detail-增加下载进度提示
0.3.86
修复-epidemic_dxy-完善图片获取
0.3.87
新增-债券质押式回购成交明细数据
新增-细化到地市的疫情历史数据20200123至今
0.3.88
新增-交易法门-工具-持仓分析-持仓季节性
修复-epidemic_163
0.3.89
新增-epidemic_163-数据说明接口
0.3.90
修复-epidemic_dxy
0.3.91
修复-get_receipt-MA数值问题
0.3.92
新增-奇货可查接口测试
0.3.93
新增-奇货可查接口测试-代码补全
0.3.94
修复-epidemic_dxy
0.3.95
新增-债券-沪深债券
新增-债券-沪深可转债
0.3.96
修复-baidu_search_index-异常
0.3.97
新增-特许经营数据
0.3.98
修复-get_receipt-MA数值问题条件判断
0.3.99
修复-air_hebei-代码格式
0.4.1
修复-pandas-版本降级
0.4.2
修复-epidemic_baidu
0.4.3
新增-慈善中国
0.4.4
新增-epidemic_history-疫情所有历史数据
0.4.5
完善-慈善中国-类型注解
0.4.6
修复-charity_china_report
0.4.7
新增-测试接口
0.4.8
修复-epidemic_hist_all
修复-epidemic_hist_city
修复-epidemic_hist_province
0.4.9
新增-option_sina_cffex_hs300_list
新增-option_sina_cffex_hs300_spot
新增-option_sina_cffex_hs300_daily
新增-option_sina_sse_list
新增-option_sina_sse_expire_day
新增-option_sina_sse_codes
新增-option_sina_sse_spot_price
新增-option_sina_sse_underlying_spot_price
新增-option_sina_sse_greeks
新增-option_sina_sse_minute
新增-option_sina_sse_daily
0.4.10
修复-金十数据websocket接口
0.4.11
新增-交易法门-工具-资金分析-资金流向
新增-交易法门-工具-资金分析-沉淀资金
新增-交易法门-工具-资金分析-资金季节性
新增-交易法门-工具-资金分析-成交排名
0.4.12
新增-微博舆情报告
0.4.13
新增-Python3.8.1支持
0.4.14
修复-get_receipt-CZCE问题
0.4.15
修复-hf_subscribe_exchange_symbol-在Linux Python 3.8.1 报错问题
0.4.16
修复-get_js_dc_current
0.4.17
新增-知识图谱
0.4.18: fix: use tqdm replace print hints
0.4.19: fix: use tqdm replace print hints in energy_carbon.py and charity_china.py
0.4.20: add: jyfm_tools_position_structure and jyfm_tools_symbol_handbook
0.4.21: fix: macro_cons_opec_month print hints
0.4.22: fix: add tqdm desc
0.4.23: fix: add tqdm stock_zh_a_spot desc
0.4.24: fix: add get_us_stock_name to get the u.s. stock name
0.4.25: fix: upload setup.py file and set automate release and deploy
0.4.26: fix: bond_spot_quote and docs
0.4.27: test: automate test
0.4.28: test: automate test
0.4.29: feats: add currency interface
0.4.30: fix: futures_roll_yield.py/get_roll_yield: CUefp error
0.4.31: format: format currency.py
0.4.32: fix: china_bond.py
0.4.33: add: jyfm_tools_futures_arbitrage_matrix for jyfm futures
0.4.34: fix: get_czce_rank_table history-20171228 format
0.4.35: fix: get_czce_rank_table history-20071228 format
0.4.36: fix: macro_cons_opec_month
0.4.37: add: get_ine_daily to fetch SC and NR data
0.4.38: add: futures_sgx_daily to fetch futures data from sgx
0.4.39: refactor: covid.py/covid_19_163 interface
0.4.40: refactor: covid.py interface
0.4.41: fix: cot.py get_rank_sum_daily interface
0.4.42: add: wdbank.py test
0.4.43: add: wdbank.py dependencies
0.4.44: add: tool github
0.4.45: add: fund_public file and docs
0.4.46: add: macro_china_lpr
0.4.47: add: stock_em_analyst
0.4.48: add: stock_em_comment
0.4.49: add: stock_em_hsgt
0.4.50: fix: stock_em_sy_yq_list
0.4.51: add: stock_tfp_em
0.4.52: fix: covid.py
0.4.53: fix: futures_hq_sina.py
0.4.54: add: futures_foreign
0.4.55: fix: macro_constitute.py
0.4.56: add: index_vix
0.4.57: fix: covid-19; desc: delete pic show
0.4.58: add: qhkc api
0.4.59: add: jyfm_tools
0.4.60: fix: covid_19_dxy and cot.py
0.4.61: fix: cot.py dict's keys use strip
0.4.62: fix: add PG into cons.py map_dict
0.4.63: add: energy_oil to add energy_oil_hist and energy_oil_detail
0.4.64: add: futures_em_spot_stock
0.4.65: add: futures_global_commodity_name_url_map
0.4.66: fix: fund_em.py timezone transfer
0.4.67: fix: covid covid_19_area_detail
0.4.68: fix: marco_usa
0.4.69: add: futures_cfmmc
0.4.70: add: covid_19 CSSE 数据接口
0.4.71: add: argus
0.4.72: add: stock_zh_tick_163
0.4.73: add: stock_zh_tick_tx_js
0.4.74: fix: stock_zh_tick_163 return tips
0.4.75: fix: nh_index
0.4.76: add: fred_md
0.4.77: fix: get_dce_option_daily
0.4.78: add: internal_flow_history
0.4.79: add: stock_em_dxsyl
0.4.80: fix: covid and docs
0.4.81: add: stock_em_yjyg and stock_em_yysj
0.4.82: fix: futures_xgx_index
0.4.83: fix: fortune_500.py
0.4.84: fix: a and kcb stock return format
0.4.85: fix: a and kcb stock field
0.4.86: add: hf_sp_500
0.4.87: fix: jinshi data update
0.4.88: fix: macro_china
0.4.89: fix: macro_other
0.4.90: fix: stock_zh_a and stock_zh_kcb return adjusted stock price
0.4.91: add: futures_inventory_em
0.4.92: fix: adjust hk_stock_sina, us_stock_sina
0.4.93: fix: air_quality
0.4.94: fix: air_quality path
0.4.95: add: js file
0.4.96: fix: format air interface
0.4.97: fix: interbank_rate_em.py add need_page parameter to control update content
0.4.98: add: mplfinance package
0.4.99: add: fund_em
0.5.1: fix: add PG to futures list
0.5.2: fix: air_zhenqi.py rename air_city_dict to air_city_list
0.5.3: add: add two fields into covid_163
0.5.4: fix: fix request_fun timeout and error type
0.5.5: fix: fund_em_graded_fund_daily return fields
0.5.6: fix: stock_us_sina.py rename columns
0.5.7: fix: import akshare only load functions
0.5.8: add: macro_china_money_supply
0.5.9: add: macro_china_new_house_price, macro_china_enterprise_boom_index, macro_china_national_tax_receipts
0.5.10: fix: zh_stock_ah_tx
0.5.11: fix: fund_em return fields
0.5.12: fix: add date to fund_em daily function
0.5.13: add: stock_fund
0.5.14: add: stock_market_fund_flow, stock_sector_fund_flow, stock_individual_fund_flow_rank
0.5.15: fix: baidu_index
0.5.16: add: fund_em_value_estimation
0.5.17: fix: delete macro_euro zero value
0.5.18: add: stock_financial_abstract, stock_financial_analysis_indicator
0.5.19: add: stock_add_stock, stock_ipo_info, stock_history_dividend_detail, stock_history_dividend
0.5.20: add: stock_restricted_shares, stock_circulate_stock_holder
0.5.21: add: futures_dce_position_rank
0.5.22: fix: fix futures_dce_position_rank return format
0.5.23: add: stock_sector_spot, stock_sector_detail
0.5.24: fix: futures_dce_position_rank
0.5.25: fix: futures_dce_position_rank return fields
0.5.26: add: stock_info
0.5.27: add: stock_em_hsgt_hold_stock
0.5.28: add: stock_fund_stock_holder, stock_main_stock_holder
0.5.29: fix: stock_em_sy
0.5.30: fix: air_zhenqi.py
0.5.31: fix: add futures_dce_position_rank_other to fix futures_dce_position_rank at 20160104
0.5.32: fix: futures_dce_position_rank_other return format
0.5.33: add: zh_bond_cov_sina and set pandas version
0.5.34: fix: set pandas version > 0.25
0.5.35: add: bond_cov_comparison and bond_zh_cov
0.5.36: fix: stock_info_sz_name_code return code format
0.5.37: add: stock_hold
0.5.38: fix: futures_dce_position_rank_other exchange symbol and variety
0.5.39: add: stock_recommend
0.5.40: fix: stock_recommend output format
0.5.41: fix: deprecated requests-html module
0.5.42: fix: reformat investing interface
0.5.43: fix: qhck interface
0.5.44: add: LME holding and stock report
0.5.45: fix: transform the data type of stock_zh_a_spot output
0.5.46: add: CFTC holding and stock
0.5.47: fix: fix index_investing_global interface
0.5.48: fix: fix stock_info_a_code_name interface
0.5.49: fix: fix stock_zh_a_daily interface
0.5.50: fix: fix get_roll_yield_bar interface
0.5.51: add: stock_summary
0.5.52: fix: fix get_roll_yield_bar interface
0.5.53: add: add watch_jinshi_quotes interface
0.5.54: add: add stock_js_price interface
0.5.55: add: add futures_czce_warehouse_receipt interface
0.5.56: add: add futures_dce_warehouse_receipt, futures_shfe_warehouse_receipt interface
0.5.57: fix: fix macro data interface
0.5.58: add: add stock_em_qsjy interface
0.5.59: fix: fix fund interface
0.5.60: fix: add index_bloomberg_billionaires interface
0.5.61: fix: fix futures_rule interface
0.5.62: add: add stock_a_pe, stock_a_pb interface
0.5.63: add: add stock_a_lg_indicator interface
0.5.64: add: add stock_a_high_low_statistics interface
0.5.65: add: add stock_a_below_net_asset_statistics interface
0.5.66: fix: fix stock_zh_a_daily default return unadjusted data
0.5.67: fix: fix R and MATLAB compatibility issues
0.5.68: add: add option_commodity_sina interface
0.5.69: fix: fix option_commodity_sina interface
0.5.70: merge: merge #4048
0.5.71: add: add tool_trade_date_hist interface
0.5.72: add: add fund_etf_category_sina, fund_etf_hist_sina interface
0.5.73: add: add stock_report_disclosure interface
0.5.74: add: add stock_zh_a_minute interface
0.5.75: add: add futures_zh_minute_sina interface
0.5.76: add: add option_sina_finance_minute interface
0.5.77: fix: fix currency_hist interface return data format
0.5.78: add: add hold field in futures_zh_minute_sina interface
0.5.79: add: add stock_report_fund_hold interface
0.5.80: fix: fix PG to futures cons file
0.5.81: add: add stock_zh_index_hist_csindex interface
0.5.82: fix: fix LU to futures cons file
0.5.83: fix: fix qhkc broker_positions_process interface
0.5.84: fix: fix tool_trade_date_hist_sina interface and update calendar.json
0.5.85: add: add index_stock_hist interface
0.5.86: fix: fix code format
0.5.87: fix: fix cot interface
0.5.88: fix: fix stock_em_account interface
0.5.89: add: add macro_china_new_financial_credit interface
0.5.90: add: add stock_sina_lhb interface
0.5.91: fix: fix covid for python3.8
0.5.92: fix: fix futures_daily_bar interface
0.5.93: add: add macro_china_fx_gold interface
0.5.94: add: add stock_zh_index_daily_em, bond_cov_jsl interface
0.5.95: fix: fix get_dce_option_daily interface
0.5.96: add: add stock_em_hsgt_hist interface
0.5.97: fix: fix remove mplfinance package in requirements.txt
0.5.98: add: add stock_hk_eniu_indicator interface
0.5.99: fix: fix stock_zh_ah_daily interface
0.6.1: fix: fix stock_zh_ah_daily interface set default value
0.6.2: fix: fix stock_zh_a_minute interface and add adjust parameter
0.6.3: fix: fix stock_zh_a_minute interface
0.6.4: add: add macro_china interface
0.6.5: add: add macro_china_wbck interface
0.6.6: fix: fix macro_china_wbck interface
0.6.7: add: add index_stock_cons_sina interface
0.6.8: fix: fix option_commodity interface
0.6.9: fix: fix stock_em_gpzy_pledge_ratio interface
0.6.10: add: add macro_china_hb, macro_china_gksccz, macro_china_bond_public interface
0.6.11: fix: fix python version should be 3.7 later
0.6.12: fix: fix stock_em_gpzy_distribute_statistics_company interface
0.6.13: add: add stock_us_fundamental interface
0.6.14: fix: fix stock_us_fundamental interface
0.6.15: fix: fix macro_china_market_margin_sh interface
0.6.16: fix: fix stock_us_daily time period and adjust for specific stock
0.6.17: fix: fix stock_js_weibo_report interface
0.6.18: fix: fix get_shfe_option_daily interface column name
0.6.19: fix: fix stock_hk_daily interface to process non-dividend stock
0.6.20: fix: fix covid_baidu interface
0.6.21: fix: fix futures_hf_spot interface
0.6.22: fix: fix stock_zh_index_daily_tx interface
0.6.23: fix: fix currency_hist interface
0.6.24: fix: fix stock_zh_kcb_spot interface
0.6.25: add: add stock_register_kcb interface
0.6.26: add: add stock_em_sy_list interface
0.6.27: fix: fix stock_sector_detail interface
0.6.28: add: add stock_register_cyb interface
0.6.29: fix: fix stock_zh_a_daily interface
0.6.30: add: add energy interface
0.6.31: fix: fix energy interface
0.6.32: fix: fix docs interface
0.6.33: fix: fix get_roll_yield_bar interface
0.6.34: fix: fix currency_investing and futures_inventory_em interface and add index_stock_cons_csindex interface
0.6.35: fix: fix get_futures_daily interface
0.6.36: fix: fix stock_info_a_code_name interface
0.6.37: fix: fix stock_sector_detail interface
0.6.38: fix: fix get_futures_daily interface
0.6.39: add: add stock_em_xgsglb interface
0.6.40: add: add stock_zh_a_new interface
0.6.41: fix: fix get_ine_daily interface
0.6.42: add: add bond_futures_deliverable_coupons interface
0.6.43: fix: fix bond_futures_deliverable_coupons interface
0.6.44: add: add futures_comex_inventory interface
0.6.45: add: add macro_china_xfzxx interface
0.6.46: add: add macro_china_reserve_requirement_ratio interface
0.6.47: fix: fix franchise_china interface
0.6.48: fix: fix get_rank_sum interface
0.6.49: fix: fix get_dce_rank_table interface
0.6.50: add: add macro_china_hgjck, macro_china_consumer_goods_retail interface
0.6.51: fix: fix macro_china_hgjck interface
0.6.52: add: add macro_china_society_electricity interface
0.6.53: add: add macro_china_society_traffic_volume interface
0.6.54: add: add macro_china_postal_telecommunicational interface
0.6.55: add: add macro_china_international_tourism_fx interface
0.6.56: add: add macro_china_swap_rate interface
0.6.57: fix: fix stock_sina_lhb_detail_daily interface
0.6.58: add: add bond_china_close_return interface
0.6.59: add: add macro_china_passenger_load_factor interface
0.6.60: fix: fix stock_sina_lhb_ggtj interface
0.6.61: fix: fix option_czce_hist interface
0.6.62: fix: fix sunrise_daily interface
0.6.63: fix: fix get_roll_yield_bar interface
0.6.64: add: add macro_china_insurance interface
0.6.65: add: add macro_china_supply_of_money interface
0.6.66: add: add support for python 3.9.0
0.6.67: add: add macro_china_foreign_exchange_gold interface
0.6.68: add: add macro_china_retail_price_index interface
0.6.69: fix: fix box_office_spot interface
0.6.70: fix: fix bond_investing_global interface
0.6.71: fix: fix nh_return_index interface
0.6.72: fix: fix get_receipt interface
0.6.73: add: add news_cctv interface
0.6.74: fix: fix macro and acm interface
0.6.75: add: add movie_boxoffice interface
0.6.76: fix: fix remove execjs dependence
0.6.77: add: add macro_china_real_estate interface
0.6.78: fix: fix movie_boxoffice interface
0.6.79: fix: split movie_boxoffice to single interface
0.6.80: fix: movie_boxoffice interface
0.6.81: fix: fix stock_report_fund_hold interface
0.6.82: fix: fix stock_em_comment interface
0.6.83: add: add crypto_hist and crypto_name_map interface
0.6.84: fix: fix crypto_hist interface
0.6.85: fix: fix stock_a_pb and stock_a_pe interface
0.6.86: fix: fix stock_zh_a_minute interface
0.6.87: fix: remove email interface
0.6.88: fix: fix get_dce_rank_table interface
0.6.89: fix: fix get_dce_rank_table interface
0.6.90: add: add fund_em_rank interface
0.6.91: fix: fix get_futures_daily interface
0.6.92: add: add repo_rate_hist interface
0.6.93: fix: fix stock_report_fund_hold interface
0.6.94: fix: fix docs interface
0.6.95: fix: fix macro_china_market_margin_sh interface
0.6.96: fix: fix stock_zh_a_daily interface
0.6.97: add: add stock_em_hsgt_board_rank interface
0.6.98: fix: fix fortune_rank interface
0.6.99: add: add forbes_rank interface
0.7.1: fix: fix futures_dce_position_rank interface
0.7.2: add: add xincaifu_rank interface
0.7.3: add: add hurun_rank interface
0.7.4: fix: fix hurun_rank interface
0.7.5: add: add currency_pair_map interface
0.7.6: fix: fix stock_em_jgdy_detail interface
0.7.7: fix: fix stock_info interface
0.7.8: fix: fix bond_cov_jsl interface
0.7.9: fix: fix stock_em_jgdy_detail interface
0.7.10: fix: fix match_main_contract interface
0.7.11: fix: fix stock_em_analyst_rank and stock_em_analyst_detail interface
0.7.12: add: add stock_zh_a_cdr_daily interface
0.7.13: fix: fix stock_zh_a_cdr_daily and stock_zh_a_daily interface
0.7.14: fix: fix get_receipt interface
0.7.15: add: add futures_contract_detail interface
0.7.16: fix: fix futures_zh_spot interface
0.7.17: del: del zdzk interface
0.7.18: fix: fix stock_zh_a_daily interface
0.7.19: fix: fix stock_zh_a_daily interface
0.7.20: fix: fix stock_em_jgdy_tj interface
0.7.21: fix: fix zh_stock_kcb_report interface
0.7.22: fix: fix zh_stock_kcb_report interface
0.7.23: fix: fix fund_em_open_fund_info interface
0.7.24: fix: fix futures_spot_price_daily interface
0.7.25: add: add option_current_em interface
0.7.26: fix: fix option_current_em interface
0.7.27: add: add js_news interface
0.7.28: fix: fix js_news interface
0.7.29: fix: fix macro_china_market_margin_sh interface
0.7.30: add: add nlp_answer interface
0.7.31: fix: fix index_sw interface
0.7.32: add: add index_cni interface
0.7.33: add: add more index_cni interface
0.7.34: add: add stock_dzjy_sctj interface
0.7.35: add: add stock_dzjy_mrmx interface
0.7.36: add: add stock_dzjy_mrtj interface
0.7.37: add: add stock_dzjy_hygtj interface
0.7.38: add: add stock_dzjy_hyyybtj interface
0.7.39: add: add stock_dzjy_yybph interface
0.7.40: fix: fix js_news interface
0.7.41: add: add stock_em_yzxdr interface
0.7.42: fix: fix fund_em_etf_fund_daily interface
0.7.43: fix: fix match_main_contract interface
0.7.44: fix: fix stock_hk_daily interface
0.7.45: fix: fix stock_em_yzxdr interface
0.7.46: fix: fix option_czce_hist interface
0.7.47: fix: fix bond_zh_cov interface
0.7.48: fix: fix futures_dce_position_rank interface
0.7.49: fix: fix stock_us_zh_spot interface
0.7.50: fix: fix stock_em_hsgt_stock_statistics interface
0.7.51: fix: fix stock_us_daily interface
0.7.52: fix: fix stock_sector_fund_flow_rank interface
0.7.53: fix: fix stock_em_yzxdr interface
0.7.54: add: add stock_a_code_to_symbol interface
0.7.55: add: add stock_news_em interface
0.7.56: fix: fix stock_news_em interface
0.7.57: fix: fix xlrd support
0.7.58: fix: fix stock_zh_a_tick_tx_js support
0.7.59: fix: fix read_excel support
0.7.60: fix: fix fund_em_open_fund_daily interface
0.7.61: fix: fix calendar.json interface
0.7.62: fix: fix QQ group interface
0.7.63: add: add bond_summary_sse interface
0.7.64: fix: fix macro_cons_gold_volume interface
0.7.65: fix: fix fund_em_value_estimation interface
0.7.66: fix: fix fund_em_value_estimation interface
0.7.67: fix: fix get_dce_daily interface
0.7.68: fix: fix stock_zh_index_spot interface
0.7.69: fix: fix covid_19 interface
0.7.70: fix: fix get_dce_rank_table interface
0.7.71: fix: fix stock_us_daily interface
0.7.72: fix: fix get_ine_daily interface
0.7.73: add: add macro_china_money_supply interface
0.7.74: fix: fix stock_zh_a_minute interface
0.7.75: add: add bond_cash_summary_sse interface
0.7.76: fix: fix get_rank_sum_daily interface
0.7.77: fix: fix get_inventory_data interface
0.7.78: fix: fix futures_inventory_99 interface
0.7.79: fix: fix stock_a_below_net_asset_statistics interface
0.7.80: add: add bank_rank_banker interface
0.7.81: add: add macro_china_stock_market_cap interface
0.7.82: fix: fix macro_china_stock_market_cap interface
0.7.83: fix: fix stock_news_em interface
0.7.84: fix: fix covid_19_dxy interface
0.7.85: add: add futures_spot_price_previous interface
0.7.86: add: add fund_em_hk_rank interface
0.7.87: add: add fund_em_lcx_rank interface
0.7.88: fix: fix bond_repo_zh_tick interface
0.7.89: fix: fix stock_hk_daily interface
0.7.90: fix: fix stock_em_gpzy_pledge_ratio interface
0.7.91: fix: fix stock_report_disclosure interface
0.7.92: add: add fund_em_hk_fund_hist interface
0.7.93: add: add fund_portfolio_hold_em interface
0.7.94: fix: fix futures_spot_price_previous interface
0.7.95: add: add covid_19_trace interface
0.7.96: fix: fix bond_spot_quote interface
0.7.97: fix: fix bond_spot_deal interface
0.7.98: fix: fix stock_report_fund_hold interface
0.7.99: fix: fix stock_zh_a_daily interface
0.8.1: add: add stock_report_fund_hold_detail interface
0.8.2: fix: fix option_finance_board interface
0.8.3: fix: fix stock_zh_a_daily interface
0.8.4: fix: fix option interface
0.8.5: fix: fix bond_investing_global interface
0.8.6: add: add macro_china_shrzgm interface
0.8.7: add: add stock_zh_a_tick_163_now interface
0.8.8: fix: fix add PK to CZCE
0.8.9: add: add futures delivery and spot interface
0.8.10: fix: fix fund_portfolio_hold_em interface
0.8.11: add: add futures_to_spot_dce interface
0.8.12: add: add futures_delivery_shfe interface
0.8.13: fix: fix stock_us_daily interface
0.8.14: fix: fix fund_em_open_fund_rank interface
0.8.15: fix: fix chinese_to_english interface
0.8.16: fix: fix stock_a_pe interface
0.8.17: add: add stock_financial_report_sina interface
0.8.18: fix: fix futures_spot_price_daily interface
0.8.19: add: add stock_margin_sse interface
0.8.20: add: add stock_margin_detail_sse interface
0.8.21: fix: fix stock_szse_summary interface
0.8.22: fix: fix stock_zh_a_daily interface
0.8.23: fix: fix covid_19_dxy interface
0.8.24: fix: fix fund_em_value_estimation interface
0.8.25: fix: fix stock_zh_index_daily_tx interface
0.8.26: fix: fix stock_hk_daily interface
0.8.27: fix: fix get_dce_rank_table interface
0.8.28: fix: fix stock_em_analyst_rank interface
0.8.29: add: add fund_rating interface
0.8.30: add: add fund_manager interface
0.8.31: fix: fix stock_zh_a_minute interface
0.8.32: fix: fix get_dce_rank_table interface
0.8.33: add: add stock_profit_forecast interface
0.8.34: fix: fix index_investing_global interface
0.8.35: add: add bond_zh_us_rate interface
0.8.36: add: add stock_em_fhps interface
0.8.37: add: add stock_em_yjkb interface
0.8.38: fix: fix get_czce_daily interface
0.8.39: add: add stock_board_concept_cons_ths interface
0.8.40: fix: fix stock_board_concept_cons_ths interface
0.8.41: fix: fix energy_carbon_bj interface
0.8.42: fix: fix stock_zh_a_daily interface
0.8.43: fix: fix stock_em_yjyg interface
0.8.44: fix: fix stock_em_comment interface
0.8.45: add: add stock_sse_deal_daily interface
0.8.46: fix: fix stock_board_concept_cons_ths interface
0.8.47: add: add stock_board_concept_info_ths interface
0.8.48: fix: fix fund_rating_sh fund_rating_zs fund_rating_ja interface
0.8.49: add: add stock_em_yjbb interface
0.8.50: fix: fix stock_zh_index_spot interface
0.8.51: fix: fix stock_zh_a_spot interface
0.8.52: add: add stock_em_zcfz, stock_em_lrb, stock_em_xjll interface
0.8.53: fix: fix stock_em_zcfz interface
0.8.54: fix: fix stock_register_kcb interface
0.8.55: add: add stock_ipo_declare interface
0.8.56: fix: fix index_bloomberg_billionaires interface
0.8.57: fix: fix hurun_rank interface
0.8.58: add: add hurun_rank interface
0.8.59: fix: fix get_sector_futures interface
0.8.60: fix: fix currency_hist interface
0.8.61: fix: fix stock_em_hsgt_hold_stock interface
0.8.62: fix: fix stock_zh_a_tick_163 interface
0.8.63: fix: fix futures_zh_daily_sina interface
0.8.64: fix: fix futures_inventory_em interface
0.8.65: fix: fix futures_hq_spot_df interface
0.8.66: fix: fix currency_hist interface
0.8.67: fix: fix requirements.txt interface
0.8.68: fix: fix bond_investing_global interface
0.8.69: fix: fix stock_board_concept_cons_ths interface
0.8.70: add: add stock_board_concept_index_ths interface
0.8.71: fix: fix remove obor fold
0.8.72: fix: fix stock_board_concept_index_ths interface
0.8.73: add: add stock_board_industry_index_ths interface
0.8.74: fix: fix test interface
0.8.75: fix: fix stock_board_industry_index_ths interface
0.8.76: add: add stock_notice_report interface
0.8.77: fix: fix rate_interbank interface
0.8.78: fix: fix stock_board_concept_index_ths interface
0.8.79: add: add stock_lh_yyb_most, stock_lh_yyb_capital, stock_lh_yyb_control interface
0.8.80: fix: fix stock_em_yjkb interface
0.8.81: add: add crypto_bitcoin_hold_report interface
0.8.82: fix: fix energy_carbon_hb interface
0.8.83: fix: fix get_czce_daily interface
0.8.84: fix: fix amac_fund_abs interface
0.8.85: fix: fix rename amac_person_org_list to amac_person_fund_org_list interface
0.8.86: add: add amac_person_bond_org_list interface
0.8.87: add: add stock_fund_flow_concept interface
0.8.88: add: add stock_fund_flow_industry interface
0.8.89: add: add stock_fund_flow_individual interface
0.8.90: add: add stock_fund_flow_big_deal interface
0.8.91: add: add stock_em_ggcg interface
0.8.92: fix: fix stock_zh_a_daily interface
0.8.93: fix: fix bond_spot_deal interface
0.8.94: fix: fix stock_us_daily interface
0.8.95: add: add fund_em_new_found interface
0.8.96: fix: fix get_czce_rank_table interface
0.8.97: add: add stock_wc_hot_top interface
0.8.98: add: add index_kq interface
0.8.99: fix: fix stock_individual_fund_flow_rank interface
0.9.1: fix: fix stock_profit_forecast interface
0.9.2: fix: fix get_futures_daily interface
0.9.3: fix: fix get_futures_daily interface
0.9.4: fix: fix get_shfe_daily interface
0.9.5: add: add stock_wc_hot_rank interface
0.9.6: fix: fix stock_wc_hot_rank interface
0.9.7: fix: fix stock_wc_hot_rank interface
0.9.8: fix: fix forbes_rank interface
0.9.9: fix: fix stock_a_below_net_asset_statistics interface
0.9.10: fix: fix stock_wc_hot_rank interface
0.9.11: add: add drewry_wci_index interface
0.9.12: fix: fix bond_investing_global interface
0.9.13: fix: fix currency_hist interface
0.9.14: fix: fix futures_global_commodity_hist interface
0.9.15: add: add index_kq_fashion interface
0.9.16: add: add index_eri interface
0.9.17: fix: fix futures_global_commodity_hist interface
0.9.18: fix: fix stock_em_dxsyl interface
0.9.19: add: add stock_market_activity_legu interface
0.9.20: fix: fix stock_individual_fund_flow_rank interface
0.9.21: add: add index_cflp_price interface
0.9.22: add: add index_cflp_volume interface
0.9.23: fix: fix index_cflp_volume interface
0.9.24: fix: fix stock_info_sz_name_code interface
0.9.25: add: add car_gasgoo_sale_rank interface
0.9.26: fix: fix stock_hk_daily interface
0.9.27: fix: fix stock_report_fund_hold interface
0.9.28: add: add stock_average_position_legu interface
0.9.29: add: add stock_em_qbzf interface
0.9.30: add: add stock_em_pg interface
0.9.31: fix: fix index_investing_global interface
0.9.32: fix: fix bond_investing_global interface
0.9.33: add: add marco_china_hk interface
0.9.34: fix: fix get_futures_daily interface
0.9.35: fix: fix stock_zh_a_daily interface
0.9.36: fix: fix stock_zh_a_daily hfq and qfq interface
0.9.37: fix: fix stock_wc_hot_rank interface
0.9.38: add: add stock_em_zt_pool interface
0.9.39: fix: fix stock_us_daily interface
0.9.40: fix: fix bond_cov_comparison interface
0.9.41: fix: fix stock_em_zt_pool_previous interface
0.9.42: add: add stock_em_zt_pool_strong interface
0.9.43: fix: fix stock_em_zt_pool_strong interface
0.9.44: fix: fix stock_em_zt_pool_sub_new interface
0.9.45: fix: fix stock_em_zt_pool interface
0.9.46: fix: fix spot_goods interface
0.9.47: fix: fix futures_comex_inventory interface
0.9.48: fix: fix stock_em_zcfz interface
0.9.49: fix: fix stock_hk_daily interface
0.9.50: fix: fix futures_spot_stock interface
0.9.51: fix: fix stock_hk_daily interface
0.9.52: fix: remove internal_flow_history interface
0.9.53: add: add stock_zh_a_alerts_cls interface
0.9.54: fix: fix bond_zh_us_rate interface
0.9.55: fix: fix index_vix interface
0.9.56: fix: fix macro_fx_sentiment interface
0.9.57: fix: fix stock_zh_a_alerts_cls interface
0.9.58: add: add stock_staq_net_stop interface
0.9.59: fix: fix covid_19_baidu interface
0.9.60: fix: fix currency_convert interface
0.9.61: fix: fix stock_info_sz_name_code interface
0.9.62: add: add stock_zh_a_gdhs interface
0.9.63: fix: fix stock_zh_a_gdhs interface
0.9.64: add: add futures_sina_hold_pos interface
0.9.65: fix: fix bond_zh_us_rate interface
0.9.66: fix: fix set urllib3==1.25.11
0.9.67: fix: fix stock_em_hsgt_hold_stock interface
0.9.68: fix: fix stock_zh_a_tick_tx interface
0.9.69: add: add currency_boc_sina interface
0.9.70: add: add stock_zh_a_hist interface
0.9.71: fix: fix stock_zh_a_hist interface
0.9.72: fix: fix stock_zh_a_hist interface
0.9.73: fix: fix stock_zh_a_tick_tx_js interface
0.9.74: add: add stock_changes_em interface
0.9.75: add: add stock_hk_spot_em, stock_hk_hist interface
0.9.76: add: add stock_us_spot_em, stock_us_hist interface
0.9.77: fix: fix stock_us_hist interface
0.9.78: fix: fix rename python file name interface
0.9.79: add: add crypto_bitcoin_cme interface
0.9.80: fix: fix futures_display_main_sina interface
0.9.81: add: add crypto_crix interface
0.9.82: fix: fix crypto_crix interface
0.9.83: fix: fix crypto_crix interface
0.9.84: fix: fix rename futures_hq_spot to futures_foreign_commodity_realtime interface
0.9.85: fix: fix rate_interbank interface
0.9.86: add: add fund_em_aum interface
0.9.87: fix: fix death_company interface
0.9.88: fix: fix stock_financial_analysis_indicator interface
0.9.89: fix: fix fund_manager interface
0.9.90: fix: fix stock_a_below_net_asset_statistics interface
0.9.91: fix: fix stock_em_yjbb interface
0.9.92: fix: fix stock_tfp_em interface
0.9.93: fix: fix stock_zh_a_gdhs interface
0.9.94: add: add macro_china_qyspjg, macro_china_fdi interface
0.9.95: fix: fix stock_board_concept_index_ths interface
0.9.96: fix: fix stock_info_sz_name_code interface
0.9.97: fix: fix urllib3 version at 1.25.8
0.9.98: fix: fix js_news interface
0.9.99: fix: fix news_cctv interface
1.0.1: add: add macro_usa_phs interface
1.0.2: fix: fix macro_usa_phs interface
1.0.3: add: add macro_germany interface
1.0.4: fix: fix macro_china interface
1.0.5: add: add macro_china_gyzjz interface
1.0.6: fix: fix get_receipt interface
1.0.7: fix: fix get_ine_daily interface
1.0.8: fix: fix macro_china_cpi interface
1.0.9: fix: fix stock_zh_a_gdhs interface
1.0.10: fix: fix stock_zh_a_spot_em interface
1.0.11: fix: fix stock_board_industry_name_ths interface
1.0.12: fix: fix macro_china_money_supply interface
1.0.13: fix: fix rename stock_board_concept_index_ths to stock_board_concept_hist_ths interface
1.0.14: add: add stock_board_concept_cons_em and stock_board_concept_hist_em interface
1.0.15: fix: fix stock_hk_hist interface
1.0.16: fix: fix tool_trade_date_hist_sina interface
1.0.17: fix: fix calendar.json interface
1.0.18: fix: fix reformat macro_china_national_tax_receipts, macro_china_hgjck, macro_china_stock_market_cap interface
1.0.19: fix: fix marco_china_hk interface
1.0.20: fix: fix bond_zh_hs_cov_daily interface
1.0.21: fix: fix charity_china interface
1.0.22: fix: fix stock_em_xgsglb interface
1.0.23: fix: fix stock_em_dxsyl interface
1.0.24: fix: fix stock_board_concept_hist_em interface
1.0.25: fix: fix get_receipt interface
1.0.26: add: add energy_carbon_domestic interface
1.0.27: fix: fix get_roll_yield_bar interface
1.0.28: add: add covid_19_baidu interface
1.0.29: fix: fix covid_19_baidu interface
1.0.30: fix: fix option_czce_hist interface
1.0.31: fix: fix futures_foreign_commodity_realtime interface
1.0.32: fix: fix covid_19_baidu interface
1.0.33: fix: fix bond_china_close_return interface
1.0.34: fix: fix bond_china_close_return interface
1.0.35: fix: fix bond_cov_jsl interface
1.0.36: fix: fix stock_em_hsgt_north_net_flow_in interface
1.0.37: add: add macro_swiss interface
1.0.38: add: add macro_japan interface
1.0.39: add: add macro_uk interface
1.0.40: add: add stock_szse_margin interface
1.0.41: add: add macro_australia interface
1.0.42: fix: fix index_stock_hist interface
1.0.43: fix: fix stock_margin_detail_szse interface
1.0.44: fix: fix stock_margin_detail_szse interface
1.0.45: fix: fix option_dce_daily interface and rename interface in option_commodity
1.0.46: add: add futures_pig_info interface
1.0.47: fix: fix futures_pig_info interface
1.0.48: add: add macro_canada interface
1.0.49: fix: fix stock_individual_fund_flow interface
1.0.50: fix: fix stock_em_jgdy_tj interface
1.0.51: add: add sport_olympic_hist interface
1.0.52: add: add stock_financial_hk interface
1.0.53: fix: fix tool_trade_date_hist_sina interface
1.0.54: fix: fix macro_china_gdp_yearly interface
1.0.55: fix: fix macro_china_freight_index interface
1.0.56: add: add stock_a_ttm_lyr interface
1.0.57: add: add stock_a_all_pb interface
1.0.58: add: add futures_pig_rank interface
1.0.59: fix: fix futures_zh_daily_sina interface
1.0.60: fix: fix futures_main_sina interface
1.0.61: fix: fix stock_a_all_pb interface
1.0.62: add: add futures_egg_price interface
1.0.63: fix: fix remove jyfm interface
1.0.64: fix: fix rename zh_stock_kcb_report to stock_zh_kcb_report_em interface
1.0.65: fix: fix stock_em_gpzy_pledge_ratio_detail interface
1.0.66: fix: fix macro_cons_opec_month interface
1.0.67: fix: fix futures_sgx_daily interface
1.0.68: fix: remove agoyal_stock_return interface
1.0.69: fix: remove bank_rank_banker interface
1.0.70: fix: remove watch_jinshi_quotes interface
1.0.71: fix: remove watch_argus interface
1.0.72: fix: fix amac_fund_abs interface
1.0.73: add: add bond_cash_summary_sse interface
1.0.74: fix: fix bond_zh_hs_cov_spot interface
1.0.75: fix: fix bond_futures_deliverable_coupons interface
1.0.76: fix: fix stock_financial_hk_analysis_indicator_em interface
1.0.77: fix: fix macro_china_m2_yearly interface
1.0.78: add: add reits_realtime_em, reits_info_jsl interface
1.0.79: fix: fix news_cctv interface
1.0.80: add: add stock_zh_a_hist_min_em, stock_zh_a_hist_pre_min_em interface
1.0.81: add: add stock_us_hist_min_em, stock_hk_hist_min_em interface
1.0.82: fix: fix bond_zh_cov interface
1.0.83: fix: fix macro_china interface
1.0.84: add: add bond_zh_cov_info interface
1.0.85: fix: fix stock_report_fund_hold interface
1.0.86: fix: fix stock_em_zt_pool_dtgc interface
1.0.87: fix: fix macro_china_swap_rate interface
1.0.88: fix: fix stock_zh_a_hist_min_em interface
1.0.89: fix: fix stock_hk_hist_min_em interface
1.0.90: fix: fix stock_us_hist_min_em interface
1.0.91: fix: fix stock_zh_a_hist_min_em interface
1.0.92: fix: fix stock_zh_a_hist interface
1.0.93: fix: fix stock_hk_hist_min_em interface
1.0.94: fix: fix stock_zh_a_new interface
1.0.95: fix: fix stock_zh_a_daily interface
1.0.96: add: add stock_zh_a_st_em interface
1.0.97: fix: fix futures_spot_stock interface
1.0.98: add: add stock_zh_a_new_em interface
1.0.99: fix: fix stock_wc_hot_rank interface
1.1.1: add: add index_investing_global_from_url interface
1.1.2: add: add stock_us_pink_spot_em interface
1.1.3: add: add stock_us_famous_spot_em interface
1.1.4: fix: fix stock_average_position_legu interface
1.1.5: add: add stock_rank_forecast_cninfo interface
1.1.6: fix: fix futures_zh_minute_sina interface
1.1.7: fix: fix covid_19_trace interface
1.1.8: add: add stock_industry_pe_ratio_cninfo interface
1.1.9: fix: fix stock_js_price interface
1.1.10: fix: fix stock_em_hsgt_hold_stock interface
1.1.11: fix: fix stock_fund_flow_concept interface
1.1.12: fix: fix stock_fund_flow_industry interface
1.1.13: add: add stock_dividents_cninfo interface
1.1.14: fix: fix stock_fund_flow_concept interface
1.1.15: add: add stock_new_gh_cninfo interface
1.1.16: fix: fix stock_em_jgdy_detail interface
1.1.17: fix: fix stock_em_jgdy_tj interface
1.1.18: fix: fix stock_fund_flow_concept and stock_fund_flow_industry interface
1.1.19: add: add stock_new_ipo_cninfo interface
1.1.20: fix: fix stock_a_pe interface
1.1.21 fix: fix setuptools==57.5.0 package
1.1.22 fix: fix remove demjson package
1.1.23 fix: fix update urllib3 package
1.1.24 fix: fix email address
1.1.25 add: add stock_hold_num_cninfo interface
1.1.26 fix: fix stock_fund_flow_concept interface
1.1.27 add: add stock_hold_control_cninfo interface
1.1.28 fix: fix move project to AKFamily interface
1.1.29 fix: fix urllib3>=1.25.8 package
1.1.30 fix: fix stock_zh_index_hist_csindex interface
1.1.31 add: add stock_hold_management_detail_cninfo interface
1.1.32 add: add sw_index_representation_spot interface
1.1.33 fix: fix sw_index_xxx interface
1.1.34 fix: fix drewry_wci_index interface
1.1.35 fix: fix fund_etf_category_sina interface
1.1.36 fix: fix sw_index_daily_indicator interface
1.1.37 fix: fix drewry_wci_index interface
1.1.38 add: add futures_comm_info interface
1.1.39 fix: fix futures_comm_info interface
1.1.40 fix: fix remove covid_19_history interface
1.1.41 add: add stock_zh_b_sina interface
1.1.42 fix: fix stock_zh_a_minute interface
1.1.43 add: add stock_cg_guarantee_cninfo interface
1.1.44 fix: fix stock_zh_index_daily interface
1.1.45 fix: fix stock_zh_index_daily_tx interface
1.1.46 fix: fix remove watch_jinshi_fx interface
1.1.47 fix: fix stock_em_jgdy_tj and stock_em_jgdy_detail interface
1.1.48 fix: fix rename fund_em_portfolio_hold to fund_portfolio_hold_em interface
1.1.49 fix: fix stock_em_jgdy_tj and stock_em_jgdy_detail interface
1.1.50 add: add stock_cg_lawsuit_cninfo interface
1.1.51 fix: fix stock_wc_hot_rank interface
1.1.52 add: add stock_cg_equity_mortgage_cninfo interface
1.1.53 fix: fix index_cni_detail_hist_adjust interface
1.1.54 fix: fix stock_board_concept_hist_ths interface
1.1.55 fix: fix stock_sina_lhb_ggtj and stock_sina_lhb_jgzz interface
1.1.56 add: add fund_em_aum_hist interface
1.1.57 fix: fix stock_sina_lhb_ggtj and stock_sina_lhb_jgzz interface
1.1.58 add: add bond_treasure_issue_cninfo interface
1.1.59 add: add bond_local_government_issue_cninfo interface
1.1.60 add: add bond_corporate_issue_cninfo interface
1.1.61 add: add bond_cov_issue_cninfo interface
1.1.62 fix: fix bond_zh_us_rate interface
1.1.63 add: add bond_cov_stock_issue_cninfo interface
1.1.64 add: add fund_report_stock_cninfo interface
1.1.65 fix: fix stock_notice_report interface
1.1.66 add: add fund_report_industry_allocation_cninfo interface
1.1.67 fix: fix stock_zh_index_hist_csindex interface
1.1.68 fix: fix index_stock_cons_csindex interface
1.1.69 add: add fund_scale_open_sina interface
1.1.70 add: add fund_scale_close_sina interface
1.1.71 add: add fund_scale_structured_sina interface
1.1.72 add: add fund_report_asset_allocation_cninfo interface
1.1.73 add: add stock_zh_index_value_csindex interface
1.1.74 fix: fix fund_em_etf_fund_info interface
1.1.75 add: add index_value_hist_funddb interface
1.1.76 fix: fix amac_fund_info interface
1.1.77 fix: fix stock_zh_a_tick_163_now interface
1.1.78 add: add stock_hsgt_individual_em interface
1.1.79 fix: fix stock_em_jgdy_tj interface
1.1.80 add: add support for Python 3.10 interface
1.1.81 add: add stock_hsgt_individual_detail_em interface
1.1.82 fix: fix stock_tfp_em interface
1. rename stock_em_tfp to stock_tfp_em
2. reformat output data type
1.1.83 add: add stock_ipo_benefit_ths interface
1.1.84 fix: fix stock_board_industry_index_ths interface
1. add start_date and end_date parameters
1.1.85 fix: fix stock_em_hsgt_stock_statistics interface
1.1.86 fix: fix stock_em_hsgt_stock_statistics interface
1.1.87 fix: fix stock_em_hsgt_hist interface
1.1.88 fix: fix stock_sector_spot interface
1.1.89 fix: fix stock_sector_detail interface
1.1.90 fix: fix stock_board_concept_name_ths interface
1.1.91 fix: fix stock_hsgt_individual_detail_em interface
1.1.92 add: add stock_rank_cxg_ths interface
1.1.93 add: add stock_rank_cxd_ths interface
1.1.94 fix: fix fund_portfolio_hold_em interface
1.1.95 fix: fix stock_board_concept_hist_ths interface
1.1.96 add: add bond_zh_hs_cov_min interface
1.1.97 add: add stock_rank_lxsz_ths interface
1.1.98 add: add stock_rank_lxxd_ths interface
1.1.99 add: add stock_rank_cxfl_ths interface
1.2.1 add: add stock_rank_cxsl_ths interface
1.2.2 fix: fix zh_subscribe_exchange_symbol interface
1.2.3 add: add stock_rank_xstp_ths interface
1.2.4 fix: fix fund_portfolio_hold_em interface
1.2.5 fix: fix index_stock_hist interface
1.2.6 add: add stock_rank_xxtp_ths interface
1.2.7 add: add stock_rank_ljqd_ths interface
1.2.8 add: add stock_rank_ljqs_ths interface
1.2.9 fix: fix stock_zh_a_gdhs interface
1.2.10 fix: fix bond_zh_hs_daily interface
1.2.11 add: add stock_zh_a_gdhs_detail_em interface
1.2.12 fix: fix stock_zh_a_gdhs interface
1.2.13 add: add stock_rank_xzjp_ths interface
1.2.14 add: add sw_index_second_spot interface
1.2.15 fix: fix stock_board_industry_name_ths interface
1.2.16 add: add stock_board_cons_ths interface
1.2.17 fix: fix amac_fund_info interface
1.2.18 fix: fix amac interface
1.2.19 fix: fix amac cons.py interface
1.2.20 fix: fix stock_zh_a_spot_em interface
1.2.21 fix: fix stock_zh_a_hist interface
1.2.22 fix: fix amac_fund_info interface
1.2.23 add: add video_tv interface
1.2.24 fix: fix car_gasgoo_sale_rank interface
1.2.25 fix: fix amac_manager_classify_info interface
1.2.26 fix: fix amac interface
1.2.27 add: add online_value_artist interface
1.2.28 add: add club_rank_game interface
1.2.29 add: add player_rank_game interface
1.2.30 add: add business_value_artist interface
1.2.31 fix: fix stock_em_zt_pool interface
1.2.32 add: add video_variety_show interface
1.2.33 add: add fund_fh_em interface
"""
__version__ = "1.2.33"
__author__ = "Albert King"
import sys
if sys.version_info < (3, 7):
print(f"AKShare {__version__} requires Python 3.7+ and 64 bit OS")
sys.exit(1)
del sys
"""
天天基金网-基金数据-分红送配
"""
from akshare.fund.fund_fhsp_em import fund_cf_em, fund_fh_rank_em, fund_fh_em
"""
中国电竞价值排行榜
"""
from akshare.other.other_game import club_rank_game, player_rank_game
"""
艺恩-艺人
"""
from akshare.movie.artist_yien import online_value_artist, business_value_artist
"""
艺恩-视频放映
"""
from akshare.movie.video_yien import video_variety_show, video_tv
"""
同花顺-数据中心-技术选股
"""
from akshare.stock_feature.stock_technology_ths import (
stock_rank_cxg_ths,
stock_rank_cxd_ths,
stock_rank_lxsz_ths,
stock_rank_lxxd_ths,
stock_rank_cxfl_ths,
stock_rank_cxsl_ths,
stock_rank_xstp_ths,
stock_rank_xxtp_ths,
stock_rank_ljqd_ths,
stock_rank_ljqs_ths,
stock_rank_xzjp_ths,
)
"""
沪深港通持股
"""
from akshare.stock_feature.stock_em_hsgt import (
stock_hsgt_individual_em,
stock_hsgt_individual_detail_em,
)
"""
指数估值
"""
from akshare.index.zh_stock_index_csindex import (
index_value_hist_funddb,
index_value_name_funddb,
)
"""
基金规模
"""
from akshare.fund.fund_scale_sina import (
fund_scale_open_sina,
fund_scale_close_sina,
fund_scale_structured_sina,
)
"""
巨潮资讯-数据中心-专题统计-基金报表
"""
from akshare.fund.fund_report_cninfo import (
fund_report_stock_cninfo,
fund_report_industry_allocation_cninfo,
fund_report_asset_allocation_cninfo,
)
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行
"""
from akshare.bond.bond_issue_cninfo import (
bond_treasure_issue_cninfo,
bond_local_government_issue_cninfo,
bond_corporate_issue_cninfo,
bond_cov_issue_cninfo,
bond_cov_stock_issue_cninfo,
)
"""
巨潮资讯-数据中心-专题统计-公司治理-股权质押
"""
from akshare.stock.stock_cg_equity_mortgage import stock_cg_equity_mortgage_cninfo
"""
巨潮资讯-数据中心-专题统计-公司治理-公司诉讼
"""
from akshare.stock.stock_cg_lawsuit import stock_cg_lawsuit_cninfo
"""
巨潮资讯-数据中心-专题统计-公司治理-对外担保
"""
from akshare.stock.stock_cg_guarantee import stock_cg_guarantee_cninfo
"""
B 股
"""
from akshare.stock.stock_zh_b_sina import (
stock_zh_b_spot,
stock_zh_b_daily,
stock_zh_b_minute,
)
"""
期货手续费
"""
from akshare.futures.futures_comm_qihuo import futures_comm_info
"""
实际控制人持股变动
"""
from akshare.stock.stock_hold_control_cninfo import (
stock_hold_control_cninfo,
stock_hold_management_detail_cninfo,
)
"""
股东人数及持股集中度
"""
from akshare.stock.stock_hold_num_cninfo import stock_hold_num_cninfo
"""
新股过会
"""
from akshare.stock.stock_new_cninfo import stock_new_gh_cninfo, stock_new_ipo_cninfo
"""
个股分红
"""
from akshare.stock.stock_dividents_cninfo import stock_dividents_cninfo
"""
行业市盈率
"""
from akshare.stock.stock_industry_pe_cninfo import stock_industry_pe_ratio_cninfo
"""
投资评级
"""
from akshare.stock.stock_rank_forecast import stock_rank_forecast_cninfo
"""
美股-知名美股
"""
from akshare.stock.stock_us_famous import stock_us_famous_spot_em
"""
美股-粉单市场
"""
from akshare.stock.stock_us_pink import stock_us_pink_spot_em
"""
REITs
"""
from akshare.reits.reits_basic import reits_info_jsl, reits_realtime_em
"""
鸡蛋价格数据
"""
from akshare.futures_derivative.futures_egg import (
futures_egg_price_yearly,
futures_egg_price_area,
futures_egg_price,
)
"""
全部 A 股-等权重市盈率、中位数市盈率
全部 A 股-等权重、中位数市净率
"""
from akshare.stock_feature.stock_ttm_lyr import stock_a_ttm_lyr
from akshare.stock_feature.stock_all_pb import stock_a_all_pb
"""
奥运奖牌
"""
from akshare.sport.sport_olympic import sport_olympic_hist
"""
宏观-加拿大
"""
from akshare.economic.macro_canada import (
macro_canada_cpi_monthly,
macro_canada_core_cpi_monthly,
macro_canada_bank_rate,
macro_canada_core_cpi_yearly,
macro_canada_cpi_yearly,
macro_canada_gdp_monthly,
macro_canada_new_house_rate,
macro_canada_retail_rate_monthly,
macro_canada_trade,
macro_canada_unemployment_rate,
)
"""
猪肉价格信息
"""
from akshare.futures_derivative.futures_pig import futures_pig_info, futures_pig_rank
"""
宏观-澳大利亚
"""
from akshare.economic.macro_australia import (
macro_australia_bank_rate,
macro_australia_unemployment_rate,
macro_australia_trade,
macro_australia_cpi_quarterly,
macro_australia_cpi_yearly,
macro_australia_ppi_quarterly,
macro_australia_retail_rate_monthly,
)
"""
融资融券-深圳
"""
from akshare.stock_feature.stock_szse_margin import (
stock_margin_underlying_info_szse,
stock_margin_detail_szse,
stock_margin_szse,
)
"""
英国-宏观
"""
from akshare.economic.macro_uk import (
macro_uk_gdp_yearly,
macro_uk_gdp_quarterly,
macro_uk_retail_yearly,
macro_uk_rightmove_monthly,
macro_uk_rightmove_yearly,
macro_uk_unemployment_rate,
macro_uk_halifax_monthly,
macro_uk_bank_rate,
macro_uk_core_cpi_monthly,
macro_uk_core_cpi_yearly,
macro_uk_cpi_monthly,
macro_uk_cpi_yearly,
macro_uk_halifax_yearly,
macro_uk_retail_monthly,
macro_uk_trade,
)
"""
日本-宏观
"""
from akshare.economic.macro_japan import (
macro_japan_bank_rate,
macro_japan_core_cpi_yearly,
macro_japan_cpi_yearly,
macro_japan_head_indicator,
macro_japan_unemployment_rate,
)
"""
瑞士-宏观
"""
from akshare.economic.macro_swiss import (
macro_swiss_trade,
macro_swiss_svme,
macro_swiss_cpi_yearly,
macro_swiss_gbd_yearly,
macro_swiss_gbd_bank_rate,
macro_swiss_gdp_quarterly,
)
"""
东方财富-概念板块
"""
from akshare.stock.stock_board_concept_em import (
stock_board_concept_cons_em,
stock_board_concept_hist_em,
stock_board_concept_name_em,
)
"""
德国-经济指标
"""
from akshare.economic.macro_germany import (
macro_germany_gdp,
macro_germany_ifo,
macro_germany_cpi_monthly,
macro_germany_retail_sale_monthly,
macro_germany_trade_adjusted,
macro_germany_retail_sale_yearly,
macro_germany_cpi_yearly,
macro_germany_zew,
)
"""
基金规模和规模趋势
"""
from akshare.fund.fund_em_aum import fund_em_aum, fund_em_aum_trend, fund_em_aum_hist
"""
CRIX 数据
"""
from akshare.crypto.crypto_crix import crypto_crix
"""
CME 比特币成交量
"""
from akshare.crypto.crypto_bitcoin_cme import crypto_bitcoin_cme
"""
盘口异动
"""
from akshare.stock_feature.stock_pankou import stock_changes_em
"""
A 股东方财富
"""
from akshare.stock_feature.stock_em_hist import (
stock_zh_a_spot_em,
stock_zh_a_hist,
stock_hk_spot_em,
stock_hk_hist,
stock_us_spot_em,
stock_us_hist,
stock_zh_a_hist_min_em,
stock_zh_a_hist_pre_min_em,
stock_hk_hist_min_em,
stock_us_hist_min_em,
stock_zh_b_spot_em,
)
"""
中行人民币牌价历史数据查询
"""
from akshare.currency.currency_sina_china_bank import currency_boc_sina
"""
期货持仓
"""
from akshare.futures_derivative.futures_sina_cot import futures_sina_hold_pos
"""
股东户数
"""
from akshare.stock_feature.stock_gdhs import stock_zh_a_gdhs, stock_zh_a_gdhs_detail_em
"""
两网及退市
"""
from akshare.stock.stock_stop import stock_staq_net_stop
"""
每日快讯数据
"""
from akshare.stock_feature.stock_cls_alerts import stock_zh_a_alerts_cls
"""
涨停板行情
"""
from akshare.stock_feature.stock_em_ztb import (
stock_em_zt_pool,
stock_em_zt_pool_previous,
stock_em_zt_pool_dtgc,
stock_em_zt_pool_zbgc,
stock_em_zt_pool_strong,
stock_em_zt_pool_sub_new,
)
"""
中国-香港-宏观
"""
from akshare.economic.macro_china_hk import (
marco_china_hk_cpi,
marco_china_hk_cpi_ratio,
marco_china_hk_trade_diff_ratio,
marco_china_hk_gbp_ratio,
marco_china_hk_building_amount,
marco_china_hk_building_volume,
marco_china_hk_gbp,
marco_china_hk_ppi,
marco_china_hk_rate_of_unemployment,
)
"""
增发和配股
"""
from akshare.stock_feature.stock_zf_pg import stock_em_qbzf, stock_em_pg
"""
平均持仓
"""
from akshare.stock_feature.stock_legu_average_position import (
stock_average_position_legu,
)
"""
汽车销量
"""
from akshare.other.other_car import car_gasgoo_sale_rank, car_cpca_energy_sale
"""
中国公路物流运价、运量指数
"""
from akshare.index.index_cflp import index_cflp_price, index_cflp_volume
"""
赚钱效应分析
"""
from akshare.stock_feature.stock_legu_market import stock_market_activity_legu
"""
浙江省排污权交易指数
"""
from akshare.index.index_eri import index_eri
"""
Drewry 集装箱指数
"""
from akshare.index.drewry_index import drewry_wci_index
"""
柯桥指数
"""
from akshare.index.index_kq_fz import index_kq_fz
from akshare.index.index_kq_ss import index_kq_fashion
"""
问财-热门股票
"""
from akshare.stock_feature.stock_wencai import stock_wc_hot_rank
"""
新发基金
"""
from akshare.fund.fund_em_init import fund_em_new_found
"""
高管持股
"""
from akshare.stock_feature.stock_em_gdzjc import stock_em_ggcg
"""
同花顺-数据中心-资金流向-概念资金流
"""
from akshare.stock_feature.stock_fund_flow import (
stock_fund_flow_concept,
stock_fund_flow_industry,
stock_fund_flow_big_deal,
stock_fund_flow_individual,
)
"""
比特币持仓
"""
from akshare.crypto.crypto_hold import crypto_bitcoin_hold_report
"""
证券交易营业部排行
"""
from akshare.stock_feature.stock_lh_yybpm import (
stock_lh_yyb_capital,
stock_lh_yyb_most,
stock_lh_yyb_control,
)
"""
沪深 A 股公告
"""
from akshare.stock_fundamental.stock_notice import stock_notice_report
"""
首发企业申报
"""
from akshare.stock_fundamental.stock_ipo_declare import stock_ipo_declare
"""
三大报表
"""
from akshare.stock_feature.stock_em_report import (
stock_em_zcfz,
stock_em_lrb,
stock_em_xjll,
)
"""
业绩报告
"""
from akshare.stock_feature.stock_em_yjbb import stock_em_yjbb
"""
同花顺-行业板块
"""
from akshare.stock_feature.stock_board_industry_ths import (
stock_board_industry_cons_ths,
stock_board_industry_name_ths,
stock_board_industry_info_ths,
stock_board_industry_index_ths,
stock_ipo_benefit_ths,
)
"""
同花顺-概念板块
"""
from akshare.stock_feature.stock_board_concept_ths import (
stock_board_concept_cons_ths,
stock_board_concept_name_ths,
stock_board_concept_info_ths,
stock_board_concept_hist_ths,
stock_board_cons_ths,
)
"""
分红配送
"""
from akshare.stock_feature.stock_em_fhps import stock_em_fhps
"""
中美国债收益率
"""
from akshare.bond.bond_em import bond_zh_us_rate
"""
盈利预测
"""
from akshare.stock_fundamental.stock_profit_forecast import stock_profit_forecast
"""
基金经理
"""
from akshare.fund.fund_manager import fund_manager
"""
基金评级
"""
from akshare.fund.fund_rating import (
fund_rating_sh,
fund_rating_zs,
fund_rating_ja,
fund_rating_all,
)
"""
融资融券数据
"""
from akshare.stock_feature.stock_sse_margin import (
stock_margin_detail_sse,
stock_margin_sse,
)
"""
期货交割和期转现
"""
from akshare.futures.futures_to_spot import (
futures_to_spot_czce,
futures_to_spot_shfe,
futures_to_spot_dce,
futures_delivery_dce,
futures_delivery_shfe,
futures_delivery_czce,
futures_delivery_match_dce,
futures_delivery_match_czce,
)
"""
基金持仓
"""
from akshare.fund.fund_em_portfolio import fund_portfolio_hold_em
"""
债券概览
"""
from akshare.bond.bond_summary import bond_deal_summary_sse, bond_cash_summary_sse
"""
新闻-个股新闻
"""
from akshare.news.news_stock import stock_news_em
"""
股票数据-一致行动人
"""
from akshare.stock_feature.stock_em_yzxdr import stock_em_yzxdr
"""
大宗交易
"""
from akshare.stock.stock_dzjy import (
stock_dzjy_sctj,
stock_dzjy_mrmx,
stock_dzjy_mrtj,
stock_dzjy_hygtj,
stock_dzjy_yybph,
stock_dzjy_hyyybtj,
)
"""
国证指数
"""
from akshare.index.index_cni import (
index_cni_hist,
index_cni_all,
index_cni_detail,
index_cni_detail_hist,
index_cni_detail_hist_adjust,
)
"""
金十数据-新闻资讯
"""
from akshare.ws.js_ws_news import js_news
"""
东方财富-期权
"""
from akshare.option.option_em import option_current_em
"""
科创板报告
"""
from akshare.stock.stock_zh_kcb_report import stock_zh_kcb_report_em
"""
期货合约详情
"""
from akshare.futures.futures_contract_detail import futures_contract_detail
"""
胡润排行榜
"""
from akshare.fortune.fortune_hurun import hurun_rank
"""
新财富富豪榜
"""
from akshare.fortune.fortune_xincaifu_500 import xincaifu_rank
"""
福布斯中国榜单
"""
from akshare.fortune.fortune_forbes_500 import forbes_rank
"""
回购定盘利率
"""
from akshare.rate.repo_rate import repo_rate_hist
"""
公募基金排行
"""
from akshare.fund.fund_em_rank import (
fund_em_exchange_rank,
fund_em_money_rank,
fund_em_open_fund_rank,
fund_em_hk_rank,
fund_em_lcx_rank,
)
"""
英为财情-加密货币
"""
from akshare.crypto.crypto_hist_investing import crypto_hist, crypto_name_map
"""
电影票房
"""
from akshare.movie.movie_yien import (
movie_boxoffice_cinema_daily,
movie_boxoffice_cinema_weekly,
movie_boxoffice_weekly,
movie_boxoffice_daily,
movie_boxoffice_monthly,
movie_boxoffice_realtime,
movie_boxoffice_yearly,
movie_boxoffice_yearly_first_week,
)
"""
新闻联播文字稿
"""
from akshare.news.news_cctv import news_cctv
"""
债券收盘收益率曲线历史数据
"""
from akshare.bond.bond_china_money import (
bond_china_close_return,
bond_china_close_return_map,
)
"""
COMEX黄金-白银库存
"""
from akshare.futures.futures_comex import futures_comex_inventory
"""
国债期货可交割券相关指标
"""
from akshare.bond.bond_futures import bond_futures_deliverable_coupons
"""
A 股-特别标的
"""
from akshare.stock.stock_zh_a_special import (
stock_zh_a_new,
stock_zh_a_st_em,
stock_zh_a_new_em,
stock_zh_a_stop_em,
)
"""
东方财富-注册制审核
"""
from akshare.stock_fundamental.stock_register import (
stock_register_kcb,
stock_register_cyb,
stock_register_db,
)
"""
新浪财经-龙虎榜
"""
from akshare.stock_feature.stock_sina_lhb import (
stock_sina_lhb_detail_daily,
stock_sina_lhb_ggtj,
stock_sina_lhb_jgmx,
stock_sina_lhb_jgzz,
stock_sina_lhb_yytj,
)
"""
中证指数
"""
from akshare.index.zh_stock_index_csindex import (
stock_zh_index_hist_csindex,
stock_zh_index_value_csindex,
)
"""
股票基金持仓数据
"""
from akshare.stock.stock_fund_hold import (
stock_report_fund_hold,
stock_report_fund_hold_detail,
)
"""
期货分钟数据
"""
from akshare.futures.futures_zh_sina import (
futures_zh_minute_sina,
futures_zh_daily_sina,
)
"""
股票财务报告预约披露
"""
from akshare.stock_feature.stock_cninfo_yjyg import stock_report_disclosure
"""
基金行情
"""
from akshare.fund.fund_etf import fund_etf_hist_sina, fund_etf_category_sina
"""
交易日历
"""
from akshare.tool.trade_date_hist import tool_trade_date_hist_sina
"""
commodity option
"""
from akshare.option.option_commodity_sina import (
option_sina_commodity_contract_list,
option_sina_commodity_dict,
option_sina_commodity_hist,
)
"""
A 股PE和PB
"""
from akshare.stock_feature.stock_a_pb import stock_a_pb
from akshare.stock_feature.stock_a_pe import stock_a_pe
from akshare.stock_feature.stock_a_indicator import (
stock_a_lg_indicator,
stock_hk_eniu_indicator,
)
from akshare.stock_feature.stock_a_high_low import stock_a_high_low_statistics
from akshare.stock_feature.stock_a_below_net_asset_statistics import (
stock_a_below_net_asset_statistics,
)
"""
彭博亿万富豪指数
"""
from akshare.fortune.fortune_bloomberg import index_bloomberg_billionaires
"""
stock-券商业绩月报
"""
from akshare.stock_feature.stock_em_qsjy import stock_em_qsjy
"""
futures-warehouse-receipt
"""
from akshare.futures.futures_warehouse_receipt import (
futures_czce_warehouse_receipt,
futures_dce_warehouse_receipt,
futures_shfe_warehouse_receipt,
)
"""
stock-js
"""
from akshare.stock.stock_js_us import stock_js_price
"""
stock-summary
"""
from akshare.stock.stock_summary import (
stock_sse_summary,
stock_szse_summary,
stock_sse_deal_daily,
)
"""
股票-机构推荐池
"""
from akshare.stock_fundamental.stock_recommend import (
stock_institute_recommend,
stock_institute_recommend_detail,
)
"""
股票-机构持股
"""
from akshare.stock_fundamental.stock_hold import (
stock_institute_hold_detail,
stock_institute_hold,
)
"""
stock-info
"""
from akshare.stock.stock_info import (
stock_info_sh_delist,
stock_info_sz_delist,
stock_info_a_code_name,
stock_info_sh_name_code,
stock_info_sz_name_code,
stock_info_sz_change_name,
stock_info_change_name,
)
"""
stock-sector
"""
from akshare.stock.stock_industry import stock_sector_spot, stock_sector_detail
"""
stock-fundamental
"""
from akshare.stock_fundamental.stock_finance import (
stock_financial_abstract,
stock_financial_report_sina,
stock_financial_analysis_indicator,
stock_add_stock,
stock_ipo_info,
stock_history_dividend_detail,
stock_history_dividend,
stock_circulate_stock_holder,
stock_restricted_shares,
stock_fund_stock_holder,
stock_main_stock_holder,
)
"""
stock-HK-fundamental
"""
from akshare.stock_fundamental.stock_finance_hk import (
stock_financial_hk_analysis_indicator_em,
stock_financial_hk_report_em,
)
"""
stock_fund
"""
from akshare.stock.stock_fund import (
stock_individual_fund_flow,
stock_market_fund_flow,
stock_sector_fund_flow_rank,
stock_individual_fund_flow_rank,
)
"""
air-quality
"""
from akshare.air.air_zhenqi import (
air_quality_hist,
air_quality_rank,
air_quality_watch_point,
air_city_list,
)
"""
hf
"""
from akshare.hf.hf_sp500 import hf_sp_500
"""
stock_em_yjyg
"""
from akshare.stock_feature.stock_em_yjyg import (
stock_em_yjyg,
stock_em_yysj,
stock_em_yjkb,
)
"""
stock
"""
from akshare.stock_feature.stock_em_dxsyl import stock_em_dxsyl, stock_em_xgsglb
"""
article
"""
from akshare.article.fred_md import fred_md, fred_qd
"""
covid_19 CSSE
"""
from akshare.event.covid import (
covid_19_csse_daily,
covid_19_csse_global_confirmed,
covid_19_csse_global_death,
covid_19_csse_global_recovered,
covid_19_csse_us_death,
covid_19_csse_us_confirmed,
)
"""
futures_cfmmc
"""
from akshare.futures.futures_cfmmc import futures_index_cscidx_map, futures_index_cscidx
"""
futures_em_spot_stock
"""
from akshare.futures.futures_em_spot_stock import futures_spot_stock
"""
energy_oil
"""
from akshare.energy.energy_oil import energy_oil_detail, energy_oil_hist
"""
index-vix
"""
from akshare.economic.macro_other import index_vix
"""
futures-foreign
"""
from akshare.futures.futures_foreign import futures_foreign_detail, futures_foreign_hist
"""
stock-em-tfp
"""
from akshare.stock_feature.stock_em_tfp import stock_tfp_em
"""
stock-em-hsgt
"""
from akshare.stock_feature.stock_em_hsgt import (
stock_em_hsgt_north_acc_flow_in,
stock_em_hsgt_north_cash,
stock_em_hsgt_north_net_flow_in,
stock_em_hsgt_south_acc_flow_in,
stock_em_hsgt_south_cash,
stock_em_hsgt_south_net_flow_in,
stock_em_hsgt_hold_stock,
stock_em_hsgt_hist,
stock_em_hsgt_institution_statistics,
stock_em_hsgt_stock_statistics,
stock_em_hsgt_board_rank,
)
"""
stock-em-comment
"""
from akshare.stock_feature.stock_em_comment import stock_em_comment
"""
stock-em-analyst
"""
from akshare.stock_feature.stock_em_analyst import (
stock_em_analyst_detail,
stock_em_analyst_rank,
)
"""
tool-github
"""
from akshare.tool.tool_github import tool_github_star_list, tool_github_email_address
"""
sgx futures data
"""
from akshare.futures.futures_sgx_daily import futures_sgx_daily
"""
currency interface
"""
from akshare.currency.currency import (
currency_convert,
currency_currencies,
currency_history,
currency_latest,
currency_time_series,
)
"""
知识图谱
"""
from akshare.nlp.nlp_interface import nlp_ownthink, nlp_answer
"""
微博舆情报告
"""
from akshare.stock.stock_weibo_nlp import stock_js_weibo_nlp_time, stock_js_weibo_report
"""
金融期权-新浪
"""
from akshare.option.option_finance_sina import (
option_sina_cffex_hs300_list,
option_sina_cffex_hs300_spot,
option_sina_cffex_hs300_daily,
option_sina_sse_list,
option_sina_sse_expire_day,
option_sina_sse_codes,
option_sina_sse_spot_price,
option_sina_sse_underlying_spot_price,
option_sina_sse_greeks,
option_sina_sse_minute,
option_sina_sse_daily,
option_sina_finance_minute,
)
"""
中国-慈善
"""
from akshare.charity.charity_china import (
charity_china_organization,
charity_china_plan,
charity_china_platform,
charity_china_progress,
charity_china_report,
charity_china_trust,
)
"""
中国-特许经营数据
"""
from akshare.event.franchise import franchise_china
"""
债券-沪深债券
"""
from akshare.bond.bond_zh_sina import bond_zh_hs_daily, bond_zh_hs_spot
from akshare.bond.bond_zh_cov_sina import (
bond_zh_hs_cov_daily,
bond_zh_hs_cov_spot,
bond_cov_comparison,
bond_zh_cov,
bond_zh_cov_info,
bond_zh_hs_cov_min,
)
from akshare.bond.bond_convert import bond_cov_jsl
"""
for pro api
"""
from akshare.pro.data_pro import pro_api
"""
for pro api token set
"""
from akshare.utils.token_process import set_token
"""
债券质押式回购成交明细数据
"""
from akshare.bond.china_repo import bond_repo_zh_tick
"""
新型肺炎
"""
from akshare.event.covid import (
covid_19_trip,
covid_19_trace,
)
"""
基金数据接口
"""
from akshare.fund.fund_em import (
fund_em_open_fund_daily,
fund_em_open_fund_info,
fund_em_etf_fund_daily,
fund_em_etf_fund_info,
fund_em_financial_fund_daily,
fund_em_financial_fund_info,
fund_em_fund_name,
fund_em_graded_fund_daily,
fund_em_graded_fund_info,
fund_em_money_fund_daily,
fund_em_money_fund_info,
fund_em_value_estimation,
fund_em_hk_fund_hist,
)
"""
百度迁徙地图接口
"""
from akshare.event.covid import (
migration_area_baidu,
migration_scale_baidu,
)
"""
新增-事件接口新型冠状病毒接口
"""
from akshare.event.covid import (
covid_19_163,
covid_19_dxy,
covid_19_baidu,
covid_19_hist_city,
covid_19_hist_province,
)
"""
英为财情-外汇-货币对历史数据
"""
from akshare.fx.currency_investing import (
currency_hist,
currency_name_code,
currency_pair_map,
)
"""
商品期权-郑州商品交易所-期权-历史数据
"""
from akshare.option.option_czce import option_czce_hist
"""
宏观-经济数据-银行间拆借利率
"""
from akshare.interest_rate.interbank_rate_em import rate_interbank
"""
东方财富网-经济数据-银行间拆借利率
"""
from akshare.interest_rate.interbank_rate_em import rate_interbank
"""
金十数据中心-外汇情绪
"""
from akshare.economic.macro_other import macro_fx_sentiment
"""
金十数据中心-经济指标-欧元区
"""
from akshare.economic.macro_euro import (
macro_euro_gdp_yoy,
macro_euro_cpi_mom,
macro_euro_cpi_yoy,
macro_euro_current_account_mom,
macro_euro_employment_change_qoq,
macro_euro_industrial_production_mom,
macro_euro_manufacturing_pmi,
macro_euro_ppi_mom,
macro_euro_retail_sales_mom,
macro_euro_sentix_investor_confidence,
macro_euro_services_pmi,
macro_euro_trade_balance,
macro_euro_unemployment_rate_mom,
macro_euro_zew_economic_sentiment,
macro_euro_lme_holding,
macro_euro_lme_stock,
)
"""
金十数据中心-经济指标-央行利率-主要央行利率
"""
from akshare.economic.macro_bank import (
macro_bank_australia_interest_rate,
macro_bank_brazil_interest_rate,
macro_bank_china_interest_rate,
macro_bank_brazil_interest_rate,
macro_bank_english_interest_rate,
macro_bank_euro_interest_rate,
macro_bank_india_interest_rate,
macro_bank_japan_interest_rate,
macro_bank_newzealand_interest_rate,
macro_bank_russia_interest_rate,
macro_bank_switzerland_interest_rate,
macro_bank_usa_interest_rate,
)
"""
义乌小商品指数
"""
from akshare.index.index_yw import index_yw
"""
股票指数-股票指数-成份股
"""
from akshare.index.index_cons import (
index_stock_info,
index_stock_cons,
index_stock_hist,
index_stock_cons_sina,
index_stock_cons_csindex,
stock_a_code_to_symbol,
)
"""
东方财富-股票账户
"""
from akshare.stock_feature.stock_em_account import stock_em_account
"""
期货规则
"""
from akshare.futures.futures_rule import futures_rule
"""
东方财富-商誉专题
"""
from akshare.stock_feature.stock_em_sy import (
stock_em_sy_profile,
stock_em_sy_yq_list,
stock_em_sy_jz_list,
stock_em_sy_list,
stock_em_sy_hy_list,
)
"""
东方财富-股票质押
"""
from akshare.stock_feature.stock_em_gpzy import (
stock_em_gpzy_pledge_ratio,
stock_em_gpzy_profile,
stock_em_gpzy_distribute_statistics_bank,
stock_em_gpzy_distribute_statistics_company,
stock_em_gpzy_industry_data,
stock_em_gpzy_pledge_ratio_detail,
)
"""
东方财富-机构调研
"""
from akshare.stock_feature.stock_em_jgdy import stock_em_jgdy_tj, stock_em_jgdy_detail
"""
IT桔子
"""
from akshare.fortune.fortune_it_juzi import (
death_company,
maxima_company,
nicorn_company,
)
"""
新浪主力连续接口
"""
from akshare.futures_derivative.sina_futures_index import (
futures_main_sina,
futures_display_main_sina,
)
"""
中国宏观杠杆率数据
"""
from akshare.economic.marco_cnbs import macro_cnbs
"""
大宗商品-现货价格指数
"""
from akshare.index.index_spot import spot_goods
"""
成本-世界各大城市生活成本
"""
from akshare.cost.cost_living import cost_living
"""
能源-碳排放权
"""
from akshare.energy.energy_carbon import (
energy_carbon_domestic,
energy_carbon_bj,
energy_carbon_eu,
energy_carbon_gz,
energy_carbon_hb,
energy_carbon_sz,
)
"""
中国证券投资基金业协会-信息公示
"""
from akshare.fund.fund_amac import (
amac_manager_info,
amac_member_info,
amac_member_sub_info,
amac_aoin_info,
amac_fund_account_info,
amac_fund_info,
amac_fund_sub_info,
amac_futures_info,
amac_manager_cancelled_info,
amac_securities_info,
amac_fund_abs,
amac_manager_classify_info,
amac_person_fund_org_list,
amac_person_bond_org_list,
)
"""
世界五百强公司排名接口
"""
from akshare.fortune.fortune_500 import fortune_rank, fortune_rank_eng
"""
申万行业一级
"""
from akshare.index.index_sw import (
sw_index_representation_spot,
sw_index_spot,
sw_index_second_spot,
sw_index_cons,
sw_index_daily,
sw_index_daily_indicator,
)
"""
谷歌指数
"""
from akshare.index.index_google import google_index
"""
百度指数
"""
from akshare.index.index_baidu import (
baidu_search_index,
baidu_info_index,
baidu_media_index,
)
"""
微博指数
"""
from akshare.index.index_weibo import weibo_index
"""
经济政策不确定性指数
"""
from akshare.article.epu_index import article_epu_index
"""
南华期货-南华指数
"""
from akshare.futures_derivative.nh_index_return import (
nh_return_index,
get_nh_list_table,
)
from akshare.futures_derivative.nh_index_price import nh_price_index
from akshare.futures_derivative.nh_index_volatility import nh_volatility_index
"""
空气-河北
"""
from akshare.air.air_hebei import air_quality_hebei
"""
timeanddate-日出和日落
"""
from akshare.air.time_and_date import sunrise_daily, sunrise_monthly
"""
新浪-指数实时行情和历史行情
"""
from akshare.stock.stock_zh_a_tick_tx_163 import (
stock_zh_a_tick_tx,
stock_zh_a_tick_tx_js,
stock_zh_a_tick_163,
stock_zh_a_tick_163_now,
)
"""
新浪-指数实时行情和历史行情
"""
from akshare.index.zh_stock_index_sina import (
stock_zh_index_daily,
stock_zh_index_spot,
stock_zh_index_daily_tx,
stock_zh_index_daily_em,
)
"""
外盘期货实时行情
"""
from akshare.futures.futures_hq_sina import (
futures_foreign_commodity_realtime,
futures_foreign_commodity_subscribe_exchange_symbol,
)
"""
FF多因子数据接口
"""
from akshare.article.ff_factor import article_ff_crr
"""
Realized Library 接口
"""
from akshare.article.risk_rv import (
article_oman_rv,
article_oman_rv_short,
article_rlab_rv,
)
"""
银保监分局本级行政处罚数据
"""
from akshare.bank.bank_cbirc_2020 import bank_fjcf_table_detail
"""
科创板股票
"""
from akshare.stock.stock_zh_kcb_sina import stock_zh_kcb_spot, stock_zh_kcb_daily
"""
A股
"""
from akshare.stock.stock_zh_a_sina import (
stock_zh_a_spot,
stock_zh_a_daily,
stock_zh_a_minute,
stock_zh_a_cdr_daily,
)
"""
A+H股
"""
from akshare.stock.stock_zh_ah_tx import (
stock_zh_ah_spot,
stock_zh_ah_daily,
stock_zh_ah_name,
)
"""
加密货币
"""
from akshare.economic.macro_other import crypto_js_spot
"""
金融期权
"""
from akshare.option.option_finance import (
option_finance_board,
option_finance_underlying,
)
"""
新浪-美股实时行情数据和历史行情数据(前复权)
"""
from akshare.stock.stock_us_sina import (
stock_us_daily,
stock_us_spot,
get_us_stock_name,
stock_us_fundamental,
)
"""
新浪-港股实时行情数据和历史数据(前复权和后复权因子)
"""
from akshare.stock.stock_hk_sina import stock_hk_daily, stock_hk_spot
"""
新浪-期货实时数据
"""
from akshare.futures.futures_zh_sina import futures_zh_spot, match_main_contract
"""
西本新干线-指数数据
"""
from akshare.futures_derivative.futures_xgx import _get_code_pic, futures_xgx_index
"""
生意社-商品与期货-现期图数据
"""
from akshare.futures_derivative.sys_spot_futures import (
get_sys_spot_futures,
get_sys_spot_futures_dict,
)
"""
和讯财经-行情及历史数据
"""
from akshare.stock.stock_us_zh_hx import stock_us_zh_spot, stock_us_zh_daily
"""
和讯财经-企业社会责任
"""
from akshare.stock.stock_zh_zrbg_hx import stock_zh_a_scr_report
"""
全球宏观-机构宏观
"""
from akshare.economic.macro_constitute import (
macro_cons_gold_amount,
macro_cons_gold_change,
macro_cons_gold_volume,
macro_cons_opec_month,
macro_cons_silver_amount,
macro_cons_silver_change,
macro_cons_silver_volume,
)
"""
全球宏观-美国宏观
"""
from akshare.economic.macro_usa import (
macro_usa_eia_crude_rate,
macro_usa_non_farm,
macro_usa_unemployment_rate,
macro_usa_adp_employment,
macro_usa_core_pce_price,
macro_usa_cpi_monthly,
macro_usa_crude_inner,
macro_usa_gdp_monthly,
macro_usa_initial_jobless,
macro_usa_lmci,
macro_usa_api_crude_stock,
macro_usa_building_permits,
macro_usa_business_inventories,
macro_usa_cb_consumer_confidence,
macro_usa_core_cpi_monthly,
macro_usa_core_ppi,
macro_usa_current_account,
macro_usa_durable_goods_orders,
macro_usa_trade_balance,
macro_usa_spcs20,
macro_usa_services_pmi,
macro_usa_rig_count,
macro_usa_retail_sales,
macro_usa_real_consumer_spending,
macro_usa_ppi,
macro_usa_pmi,
macro_usa_personal_spending,
macro_usa_pending_home_sales,
macro_usa_nfib_small_business,
macro_usa_new_home_sales,
macro_usa_nahb_house_market_index,
macro_usa_michigan_consumer_sentiment,
macro_usa_exist_home_sales,
macro_usa_export_price,
macro_usa_factory_orders,
macro_usa_house_price_index,
macro_usa_house_starts,
macro_usa_import_price,
macro_usa_industrial_production,
macro_usa_ism_non_pmi,
macro_usa_ism_pmi,
macro_usa_job_cuts,
macro_usa_cftc_nc_holding,
macro_usa_cftc_c_holding,
macro_usa_cftc_merchant_currency_holding,
macro_usa_cftc_merchant_goods_holding,
macro_usa_phs,
)
"""
全球宏观-中国宏观
"""
from akshare.economic.macro_china import (
macro_china_cpi_monthly,
macro_china_cpi_yearly,
macro_china_m2_yearly,
macro_china_fx_reserves_yearly,
macro_china_cx_pmi_yearly,
macro_china_pmi_yearly,
macro_china_daily_energy,
macro_china_non_man_pmi,
macro_china_rmb,
macro_china_gdp_yearly,
macro_china_shrzgm,
macro_china_ppi_yearly,
macro_china_cx_services_pmi_yearly,
macro_china_market_margin_sh,
macro_china_market_margin_sz,
macro_china_au_report,
macro_china_ctci_detail,
macro_china_ctci_detail_hist,
macro_china_ctci,
macro_china_exports_yoy,
macro_china_hk_market_info,
macro_china_imports_yoy,
macro_china_trade_balance,
macro_china_shibor_all,
macro_china_industrial_production_yoy,
macro_china_gyzjz,
macro_china_lpr,
macro_china_new_house_price,
macro_china_enterprise_boom_index,
macro_china_national_tax_receipts,
macro_china_new_financial_credit,
macro_china_fx_gold,
macro_china_money_supply,
macro_china_stock_market_cap,
macro_china_cpi,
macro_china_gdp,
macro_china_ppi,
macro_china_pmi,
macro_china_gdzctz,
macro_china_hgjck,
macro_china_czsr,
macro_china_whxd,
macro_china_wbck,
macro_china_bond_public,
macro_china_gksccz,
macro_china_hb,
macro_china_xfzxx,
macro_china_reserve_requirement_ratio,
macro_china_consumer_goods_retail,
macro_china_society_electricity,
macro_china_society_traffic_volume,
macro_china_postal_telecommunicational,
macro_china_international_tourism_fx,
macro_china_passenger_load_factor,
macro_china_freight_index,
macro_china_central_bank_balance,
macro_china_insurance,
macro_china_supply_of_money,
macro_china_swap_rate,
macro_china_foreign_exchange_gold,
macro_china_retail_price_index,
macro_china_real_estate,
macro_china_qyspjg,
macro_china_fdi,
)
"""
全球期货
"""
from akshare.futures.futures_international import (
futures_global_commodity_hist,
futures_global_commodity_name_url_map,
)
"""
外汇
"""
from akshare.fx.fx_quote import fx_pair_quote, fx_spot_quote, fx_swap_quote
"""
债券行情
"""
from akshare.bond.china_bond import bond_spot_quote, bond_spot_deal, bond_china_yield
"""
商品期权
"""
from akshare.option.option_commodity import (
option_dce_daily,
option_czce_daily,
option_shfe_daily,
)
"""
英为财情-债券
"""
from akshare.bond.bond_investing import (
bond_investing_global,
bond_investing_global_country_name_url,
)
"""
英为财情-指数
"""
from akshare.index.index_investing import (
index_investing_global,
index_investing_global_country_name_url,
index_investing_global_from_url,
)
"""
99期货-期货库存数据
"""
from akshare.futures.futures_inventory import futures_inventory_99
"""
东方财富-期货库存数据
"""
from akshare.futures.futures_inventory_em import futures_inventory_em
"""
中国银行间市场交易商协会
"""
from akshare.bond.bond_bank import get_bond_bank
"""
奇货可查-工具模块
"""
from akshare.qhkc_web.qhkc_tool import qhkc_tool_foreign, qhkc_tool_gdp
"""
奇货可查-指数模块
"""
from akshare.qhkc_web.qhkc_index import (
get_qhkc_index,
get_qhkc_index_trend,
get_qhkc_index_profit_loss,
)
"""
奇货可查-资金模块
"""
from akshare.qhkc_web.qhkc_fund import (
get_qhkc_fund_money_change,
get_qhkc_fund_bs,
get_qhkc_fund_position,
)
"""
大宗商品现货价格及基差
"""
from akshare.futures.futures_basis import (
futures_spot_price_daily,
futures_spot_price,
futures_spot_price_previous,
)
"""
期货持仓成交排名数据
"""
from akshare.futures.cot import (
get_rank_sum_daily,
get_rank_sum,
get_shfe_rank_table,
get_czce_rank_table,
get_dce_rank_table,
get_cffex_rank_table,
futures_dce_position_rank,
futures_dce_position_rank_other,
)
"""
大宗商品仓单数据
"""
from akshare.futures.receipt import get_receipt
"""
大宗商品展期收益率数据
"""
from akshare.futures.futures_roll_yield import get_roll_yield_bar, get_roll_yield
"""
交易所日线行情数据
"""
from akshare.futures.futures_daily_bar import (
get_cffex_daily,
get_czce_daily,
get_shfe_v_wap,
get_shfe_daily,
get_dce_daily,
get_futures_daily,
)
| 25.871698 | 118 | 0.79617 | [
"MIT"
] | LoveRabbit007/akshare | akshare/__init__.py | 106,616 | Python |
from typing import Dict
from flask_babel import _
from anyway.backend_constants import InjurySeverity
from anyway.infographics_dictionaries import segment_dictionary
from anyway.models import InvolvedMarkerView
from anyway.request_params import RequestParams
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
from anyway.widgets.widget import register
from anyway.widgets.widget_utils import (
get_accidents_stats,
gen_entity_labels,
get_injured_filters,
format_2_level_items,
sort_and_fill_gaps_for_stacked_bar,
)
@register
class InjuredCountByAccidentYearWidget(SubUrbanWidget):
name: str = "injured_count_by_accident_year"
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 9
self.information = (
"Fatal, severe and light injured count in the specified years, split by injury severity"
)
def generate_items(self) -> None:
res1 = get_accidents_stats(
table_obj=InvolvedMarkerView,
filters=get_injured_filters(self.request_params.location_info),
group_by=("accident_year", "injury_severity"),
count="injury_severity",
start_time=self.request_params.start_time,
end_time=self.request_params.end_time,
)
res2 = sort_and_fill_gaps_for_stacked_bar(
res1,
range(self.request_params.start_time.year, self.request_params.end_time.year + 1),
{
InjurySeverity.KILLED.value: 0,
InjurySeverity.SEVERE_INJURED.value: 0,
InjurySeverity.LIGHT_INJURED.value: 0,
},
)
self.items = format_2_level_items(res2, None, InjurySeverity)
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": _("Number of injured in accidents, per year, split by severity")
+ f" - {segment_dictionary[request_params.location_info['road_segment_name']]}",
"labels_map": gen_entity_labels(InjurySeverity),
}
return items
_("Fatal, severe and light injured count in the specified years, split by injury severity")
| 37.870968 | 101 | 0.677172 | [
"MIT"
] | BusinessLanguage/anyway | anyway/widgets/suburban_widgets/injured_count_by_accident_year_widget.py | 2,348 | Python |
import pytest
from ml_api.api.app import create_app
from ml_api.api.config import TestingConfig
#Fixtures provide an easy way to setup and teardown resources
@pytest.fixture
def app():
app = create_app(config_object=TestingConfig)
with app.app_context():
yield app
@pytest.fixture
def flask_test_client(app):
with app.test_client() as test_client:
yield test_client
| 22.111111 | 61 | 0.753769 | [
"MIT"
] | iameminmammadov/bigmart | packages/ml_api/tests/conftest.py | 398 | Python |
import scipy, copy
import SloppyCell.Utility
load = SloppyCell.Utility.load
save = SloppyCell.Utility.save
import SloppyCell.ReactionNetworks.Dynamics as Dynamics
try:
import SloppyCell.Plotting as Plotting
except ImportError:
pass
def setup(paramfile,calcobject,senstrajfile,jtjfile) :
""" Set up the quantities necessary to run the optimal design
algorithms. NOTE: This function needs to be called first
before any of the optimal design functions can be called.
paramfile: the name of a pickled file containing the
best fit parameters in KeyedList format
calcobject: the calculation object for which we are doing the
optimal design. (Note that in general, may be searching a
design over many different calculations, but here we only
consider one. Thus, we set design_sentraj equal to senstraj)
senstrajfile: the name of the file containing the pickled
sensitivity trajectory for the calculation, calcobject,
for the set of parameters in paramfile.
jtjfile: the name of the file containing the pickled Fisher
Information Matrix (J^t J) for the current set of data and
for the parameters in paramfile.
NOTE: The derivatives computed for J^tJ need to be with respect
to the *log* of the parameters
"""
import OptDesign as v
v.curp = load(paramfile)
v.jtj = load(jtjfile)
v.clc = calcobject
v.senstraj = load(senstrajfile)
v.design_senstraj = v.senstraj
v.p_names_ordered = v.curp.keys()
v.jtjdict = {}
for pindex1,pname1 in enumerate(v.p_names_ordered) :
for pindex2,pname2 in enumerate(v.p_names_ordered) :
v.jtjdict[(pname1,pname2)] = v.jtj[pindex1][pindex2]
v.ovvarnames = v.clc.optimizableVars.keys()
v.jtjtrunc = scipy.zeros((len(v.ovvarnames),len(v.ovvarnames)),scipy.float_)
# The number of optimizable variables for the calculation we are
# considering might be less than the number of parameters for the
# whole model. We are only working with this calculation so we
# need to trim down the J^t J (Fisher information) matrix
# accordingly
for pindex1,pname1 in enumerate(v.ovvarnames) :
for pindex2,pname2 in enumerate(v.ovvarnames) :
v.jtjtrunc[pindex1][pindex2] = v.jtjdict[(pname1,pname2)]
def make_sens_traj(calcobject,params,times,senstrajfilename):
""" Make the sensitivity trajectory for the calculation
calcoject (same as in setup(...) above).
params: parameters as a KeyedList, sensitivity traj is
calculated at these parameters (should be same as in paramfile
in setup(...) above)
times: the timepoints in the sensitivity trajectory (1-d array)
senstrajfilename: the file to save the sensitivity trajectory to
Note that if times is very finely spaced, the
sensitivity trajectory will need a lot of storage space """
senstraj = Dynamics.integrate_sensitivity(calcobject, times, params, 1.0e-6)
save(senstraj,senstrajfilename)
def design_over_chems(chemnames,designchemnames,logprior=1.0e20) :
"""
chemnames = list of unmeasurable chemicals
designchemnames = list of measurable chemicals
logprior = prior on params, e.g. logprior = log(1000.0) means
parameter standard deviation will be less than a factor of 1000.0
Out of the list chemnames, find the best chemical and
best time point, that most reduces the integrated variance
over designchemnames """
times = design_senstraj.timepoints
trunc_times = [times[i] for i in scipy.arange(0,len(times),1)]
best_change = 0.0 # the change should always be negative
best_chem = "None"
best_time = "None"
for dchemname in designchemnames :
print "On design chemical ", dchemname
for t in trunc_times :
sensvect_design = get_sens_vect(dchemname,t)
# NOTE: assuming a 10% error on the measurement --- use 10% of the
# maximum value in the trajectory
maxval = max(design_senstraj.get_var_traj(dchemname)) + 1.0
sensvect_design = sensvect_design/(.1*maxval)
intvar_change = integrated_var_change(chemnames,sensvect_design,logprior)
tot_change = 0.0
for id in chemnames :
tot_change = tot_change + intvar_change[id]
if tot_change < best_change :
best_change = tot_change
best_chem = dchemname
best_time = t
return best_change, best_chem, best_time
def design_over_single_variance(sensvect,designchemnames,logprior=1.0e20) :
"""
sensvect : a sensitivity vector (length = # of params) of
unmeasurable quantity of interest
designchemnames : list of measurable chemicals
sensvect could be the sensitivity of a single chemical at a
single timepoint; then can use method get_sens_vect (see elsewhere
in this file) to compute this sensitivity vector. In that
case we are designing over the species variance at that single point
"""
times = senstraj.timepoints
trunc_times = [times[i] for i in scipy.arange(0,len(times),5)]
best_change = 0.0 # the change should always be negative
best_chem = "None"
best_time = "None"
for dchemname in designchemnames :
for t in trunc_times :
sensvect_design = get_sens_vect(dchemname,t)
var_change = single_variance_change(sensvect,sensvect_design,logprior)
if var_change < best_change :
best_change = var_change
best_chem = dchemname
best_time = t
return best_change, best_chem, best_time
def variances(chemnames,logprior=1.0e20) :
""" chemnames : list of chemical names for which the
variance at all timepoints will be computed
logprior : prior on parameters. logprior = log(1000.0)
means params allowed to vary by about a factor of 1000.0
return values :
times: times of the trajectory
bestfit: a dictionary of best fit trajectories (keys are entries in chemnames)
var: a dictionary of variances (keys are entries in chemnames)
"""
#senstraj = load('EndogenousEGFR3T3sensNoPriors')
times = senstraj.timepoints
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(
len(jtjtrunc),len(jtjtrunc)))
var = {}
bestfit = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
var[name] = []
bestfit[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
# Turn sensitivities into sensitivities with respect to log parameters
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
tmp = scipy.dot(sensarray_this_chem,jtjinv)
for i in range(len(tmp[:,0])) :
var[name].append(scipy.dot(tmp[i,:],sensarray_this_chem[i,:]))
bestfit[name] = senstraj.values[:,chemindex]
var[name] = scipy.asarray(var[name])
return times, bestfit, var
def variances_log_chems(chemnames,logprior=1.0e20) :
""" Same as above except the variances are now on the
logs of the chemicals trajectories.
"""
#senstraj = load('EndogenousEGFR3T3sensNoPriors')
times = senstraj.timepoints
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(
len(jtjtrunc),len(jtjtrunc)))
var = {}
bestfit = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
var[name] = []
bestfit[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
traj_this_chem = copy.copy(senstraj.values[:,chemindex])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
# need to scale each row by 1/chemvalue to mimic a derivative w.r.t.
# log chemicals. Add a small value to chemvalue to avoid divide by zero
for i in range(len(times)) :
sensarray_this_chem[i,:] = sensarray_this_chem[i,:]/(traj_this_chem[i]+1.0e-6)
tmp = scipy.dot(sensarray_this_chem,jtjinv)
for i in range(len(tmp[:,0])) :
var[name].append(scipy.dot(tmp[i,:],sensarray_this_chem[i,:]))
bestfit[name] = senstraj.values[:,chemindex]
var[name] = scipy.asarray(var[name])
return times,bestfit,var
def single_variance(sensvect,logprior=1.0e20) :
""" Get the variance for a single function of parameters
that has a sensitivity vector sensvect. Useful for looking at
variances in parameter combinations, or simple functions of
parameters. Note that if we are concerned with ratios and
products of parameters, it's often best to consider sensvect
as a sensitivity w.r.t. log parameters """
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(
len(jtjtrunc),len(jtjtrunc)))
tmp = scipy.dot(jtjinv,sensvect)
var = scipy.dot(sensvect,tmp)
return var
def variance_change(chemnames,sensvect_design,logprior=1.0e20) :
"""
chemnames : list of chemical names at which we will look
at variance
sensvect_design : the sensitivity vector (one by no. params array) at
the new design point.
returns : (times, varchange)
the times and the change in variances at those times (should
be negative) for each of the chemicals in chemnames, after the
addition of the new timepoint. varchange is a dictionary
indexed by entries in chemnames.
"""
times = senstraj.timepoints
n = len(jtjtrunc)
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(n,n))
#sensvect_design = scipy.resize(sensvect_design,(n,1))
jtjinv_design = scipy.dot(jtjinv,sensvect_design)
#jtjinv_design = scipy.resize(jtjinv_design,(n,1)) # want a column vector
denominator = 1.0 + scipy.dot(sensvect_design,jtjinv_design)
varchange = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
varchange[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
product = scipy.dot(sensarray_this_chem,jtjinv_design)
# this product is a number of timepoints by one vector, we need to
# square each element for the final formula
varchange[name] = -scipy.asarray(product**2/denominator)
return times, varchange
def single_variance_change(sensvect,sensvect_design,logprior=1.0e20) :
"""
sensvect : given a single function f(p) of parameters, this is the
derivative w.r.t. each of the parameters (in log parameters). For
ratios or products of rate constants, f(p) is a linear function
sensvect_design : the sensitivity vector of the new point in the
design you wish to add
returns: the variance change of the quantity f(p), given the
addition of the new data point, with sensitivity vector sensvect_design.
"""
n = len(jtjtrunc)
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(n,n))
jtjinv_design = scipy.dot(jtjinv,sensvect_design)
denominator = 1.0 + scipy.dot(sensvect_design,jtjinv_design)
product = scipy.dot(sensvect,jtjinv_design)
return -product**2/denominator
def get_sens_vect(chemname,time) :
""" get a sensitivity vector for a chemical "chemname" at a
time, time """
tindex = design_senstraj._get_time_index(time,1.0e-4)
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
index1sens = design_senstraj.key_column.get((chemname,first))
index2sens = design_senstraj.key_column.get((chemname,last))
sens_vect = copy.copy(
design_senstraj.values[tindex,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sens_vect[j] = sens_vect[j]*curp.get(pname)
return sens_vect
def get_sens_array(chemname) :
""" get an array of sens_vects for all the times the chemical is defined
and convert to log sensitivities """
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
chemindex = design_senstraj.key_column.get(chemname)
index1sens = design_senstraj.key_column.get((chemname,first))
index2sens = design_senstraj.key_column.get((chemname,last))
sensarray_this_chem = copy.copy(
design_senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
return sensarray_this_chem
def integrated_var_change(chemnames,sensvect_design,logprior=1.0e20) :
times, varchange = variance_change(chemnames,sensvect_design,logprior)
int_varchange = {}
for name in varchange.keys() :
int_varchange[name] = scipy.integrate.simps(varchange[name],times)
return int_varchange
def var_change_weighted(weights,chemnames,sensarray_design,logprior=1.0e20) :
""" This is similar to var_change except now we pass in a sensarray
instead of sensvect --- this is a matrix of sensvects aligned rowwise.
Row i will be multiplied by sqrt(weights[i]) where sum(weights)=1 and
each weight is a number between zero and one. We will return the
change in variance for all the chemicals in chemnames """
# we use the formula (Sherman-Woodbury-Morrison)
# (A+UV^t)^(-1) = A^(-1) - A^(-1)*U*(I + V^T*A^(-1)*U)^(-1)*V^t*A^(-1)
# where U = V and V^t = W^(1/2)*sensarray_design
times = senstraj.timepoints
ntimes = len(times)
k,n = sensarray_design.shape
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(n,n))
Vt = scipy.zeros((k,n),scipy.float_)
for i in range(k) :
Vt[i,:] = scipy.sqrt(weights[i])*sensarray_design[i,:]
design_jtjinv = scipy.dot(Vt,jtjinv)
#jtjinv_design = scipy.resize(jtjinv_design,(n,1)) # want a column vector
denominator = scipy.eye(k,k) + \
scipy.dot(design_jtjinv,scipy.transpose(Vt))
inv_denom = scipy.linalg.inv(denominator)
varchange = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
varchange[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
product = scipy.dot(design_jtjinv,
scipy.transpose(sensarray_this_chem))
# each column vector of this matrix has to be dotted through the
# denominator matrix --- each column is a different time point
for j in range(ntimes) :
quadprod = scipy.dot(product[:,j],inv_denom)
quadprod = scipy.dot(quadprod,product[:,j])
varchange[name].append(-quadprod)
varchange[name] = scipy.asarray(varchange[name])
return times, varchange
def integrated_var_change_weighted(weights,chemnames,sensarray_design,logprior=1.0e20) :
times, varchange = var_change_weighted(weights,chemnames,sensarray_design,
logprior)
intvarchange = {}
for name in varchange.keys() :
intvarchange[name] = scipy.integrate.simps(varchange[name],times)
return intvarchange
def weight_cost(weights,chemnames,sensarray_design,logprior=1.0e20) :
""" For this cost function we're going to assume unconstrained
variables are being passed in, so we need to convert them to
a range between 0 and 1. The sum of the weights should also = 1 """
weights0to1 = weights_trans(weights)
# now weights lie between 0 and 1
weights0to1 = weights0to1/scipy.sum(weights0to1) # this makes sure
# weights sum up to 1.
intvarchange = integrated_var_change_weighted(weights0to1,chemnames,
sensarray_design,logprior)
cost = 0.0
for n in intvarchange.keys() :
cost = cost + intvarchange[n]
return cost
def weights_trans(weights) :
wtrans = (scipy.sin(weights)+1.0)/2.0
return wtrans
def weights_inv_trans(transweights) :
w = scipy.arcsin(2.0*transweights-1.0)
return w
def minimize_weight_cost(weights,chemnames,sensarray_design,logprior=1.0e20) :
"""
weights : a vector of positive numbers with length the same as the number of
rows of sensarray_design. The weights should sum to 1
chemnames: a list of unmeasurable chemical names over which we wish
to design experiments
sensarray_design: an array of sensitivities of measurable chemicals
or just an array of sensitivity vectors, each row a different
sensitivity vector
logprior : prior on parameters. logprior = log(1000.0) allows parameters
to fluctuate by a factor of 1000 """
weights_trans = scipy.arcsin(2.0*weights-1.0)
# maxiter may need to be increased if convergence is not apparent
# or if the number of weights is increased
w = scipy.optimize.fmin(weight_cost,weights_trans,maxiter = 10000,
args=(chemnames,sensarray_design,logprior))
woptnotnormed = (scipy.sin(w)+1.0)/2.0
wopt = woptnotnormed/scipy.sum(woptnotnormed)
return woptnotnormed,wopt
def plot_variances(chemnames,logprior,scale=1.0,return_var = False) :
"""
chemnames: list of chemical names
logprior: prior on params. logprior = log(1000.0) means parameters
allowed to fluctuate by a factor of 1000 """
times, bestfit, var = variances(chemnames,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,bestfit[key]/scale)
Plotting.hold(True)
Plotting.plot(times,bestfit[key]/scale + scipy.sqrt(var[key])/scale,'r--')
Plotting.plot(times,bestfit[key]/scale - scipy.sqrt(var[key])/scale,'r--')
Plotting.title(key,fontsize=16)
Plotting.xlabel('time (minutes)',fontsize=16)
Plotting.ylabel('number of molecules',fontsize=16)
xtics = Plotting.gca().get_xticklabels()
ytics = Plotting.gca().get_yticklabels()
Plotting.setp(xtics,size=16)
Plotting.setp(ytics,size=16)
#Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
if return_var :
return times, bestfit, var
def plot_variances_log_chems(chemnames,logprior) :
"""
chemnames: list of chemical names
logprior: prior on params
Plots the standard deviation of the chemicals when the variance
is computed using logs of the chemical trajectories. This
makes sure the final plots do not have best_fit+-stddev that
do not become negative """
times, bestfit, var = variances_log_chems(chemnames,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,bestfit[key])
Plotting.hold(True)
Plotting.plot(times,bestfit[key]*scipy.exp(scipy.sqrt(var[key])),'r-')
Plotting.plot(times,bestfit[key]*scipy.exp(-scipy.sqrt(var[key])),'r-')
Plotting.title(key,fontsize=14)
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
#Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
def plot_variance_newpoint(chemnames,sensvect_design,logprior=1.0e20,
return_data = True) :
"""
chemnames: list of chemical names
sensvect_design: a sensivity vector of a quantity that is
measurable
This will plot the old and new variances of the chemicals in
chemnames, given a new measurement that has sensitivity vector
sensvect_design
"""
times,bestfit,var = variances(chemnames,logprior)
times,varchange = variance_change(chemnames,sensvect_design,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,bestfit[key])
Plotting.hold(True)
Plotting.plot(times,bestfit[key] + scipy.sqrt(var[key]),'r-')
Plotting.plot(times,bestfit[key] - scipy.sqrt(var[key]),'r-')
Plotting.plot(times,bestfit[key] + scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.plot(times,bestfit[key] - scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.title(key,fontsize=14)
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
if return_data :
newvar = {}
for ky in var.keys() :
newvar[ky] = var[key] + varchange[key]
return times,bestfit,newvar
def plot_variance_newweights(weights,chemnames,sensarray_design,logprior=1.0e20,scale=1.0,return_data = True) :
"""
weights : a proposed set of weights for each of the row vectors in
sensarray_design
chemnames : a list of chemicals for which we will plot the variance
logprior : as before
This will plot the old and new variances on chemnames, similar to
above.
NOTE: the weights that are passed in do not necessarily have to sum to
one. e.g. if the weights are normalized such that max(weights) = 1, then
by scaling all the weights by 1/sigma, you are then assuming that
the most accurate measurement has an error of size sigma. sigma for
example could be 20% of the maximum value of a trajectory.
"""
times,bestfit,var = variances(chemnames,logprior)
times,varchange = var_change_weighted(weights,chemnames,sensarray_design,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,scale*bestfit[key])
Plotting.hold(True)
Plotting.plot(times,scale*bestfit[key] + scale*scipy.sqrt(var[key]),'r-')
Plotting.plot(times,scale*bestfit[key] - scale*scipy.sqrt(var[key]),'r-')
Plotting.plot(times,scale*bestfit[key] + scale*scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.plot(times,scale*bestfit[key] - scale*scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.title(key,fontsize=14)
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
if return_data :
newvar = {}
for ky in var.keys() :
newvar[ky] = var[key] + varchange[key]
return times,bestfit,newvar
def plot_variances_subplot(chemnames,logprior) :
times, bestfit, var = variances(chemnames,logprior)
nallplots = len(chemnames)
# 9 at a time
nfigs = nallplots/9 # integer division -- no fractional part
for figno in range(1,nfigs+1) :
Plotting.figure()
for i in range(0,9) :
Plotting.subplot(3,3,i+1)
chemind = i+(figno-1)*9
Plotting.plot(times,bestfit[chemnames[chemind]])
Plotting.hold(True)
Plotting.plot(times,bestfit[chemnames[chemind]]
+ scipy.sqrt(var[chemnames[chemind]]),'r-')
Plotting.plot(times,bestfit[chemnames[chemind]]
- scipy.sqrt(var[chemnames[chemind]]),'r-')
yt = Plotting.yticks()
Plotting.axis([0,100.0,yt[0],yt[-1]])
Plotting.title(chemnames[chemind])
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
xt = Plotting.xticks()
Plotting.xticks([xt[0],xt[-1]])
Plotting.savefig('./figs/variance_wt_'+i.__str__()+'.ps')
Plotting.show()
#def fix_sf():
# make sure scale factors get computed --- easiest way is
# to compute the cost
# print "cost is ", m.cost(curp)
# sfs = m.internalVars['scaleFactors']
# for exptname in sfs.keys() :
# fixeddict = sfs[exptname]
# m.exptColl[exptname].set_fixed_sf(fixeddict)
# just check
# print "cost is now", m.cost(curp)
def reduce_size(array,skipsize) :
""" reduce_size takes an array of dimension m,n and
returns an array with every skipsize row sampled.
"""
size = array.shape
newsize = len(scipy.arange(0,size[0],skipsize))
if len(size) == 1 : # a vector
newvect = scipy.zeros((newsize,),scipy.float_)
for iind,i in enumerate(scipy.arange(0,size[0],skipsize)) :
newvect[iind] = array[i]
return newvect
elif len(size) == 2 : # an array
newarray = scipy.zeros((newsize,size[1]),scipy.float_)
for iind,i in enumerate(scipy.arange(0,size[0],skipsize)) :
newarray[iind] = array[i]
return newarray
| 43.487267 | 111 | 0.666979 | [
"BSD-3-Clause"
] | jurquiza/SloppyCellUrquiza2019 | SloppyCell/ReactionNetworks/OptDesign.py | 25,614 | Python |
# from tensorflow.contrib.training import HParams
# from aukit.audio_io import Dict2Obj
from dotmap import DotMap
import json
class Dict2Obj(DotMap):
"""
修正DotMap的get方法生成DotMap对象的bug。
Dict2Obj的get方法和dict的get功能相同。
"""
def __getitem__(self, k):
if k not in self._map:
return None
else:
return self._map[k]
def parse(self, json_string):
if json_string.strip():
_hp = json.loads(json_string)
for k, v in _hp.items():
self[k] = v
return self
one = 64
# Default hyperparameters
hparams = Dict2Obj(dict(
encoder_path=r"../models/encoder/saved_models/ge2e_pretrained.pt",
# Comma-separated list of cleaners to run on text prior to training and eval. For non-English
# text, you may want to use "basic_cleaners" or "transliteration_cleaners".
cleaners="chinese_cleaners",
center=True,
# If you only have 1 GPU or want to use only one GPU, please set num_gpus=0 and specify the
# GPU idx on run. example:
# expample 1 GPU of index 2 (train on "/gpu2" only): CUDA_VISIBLE_DEVICES=2 python train.py
# --model="Tacotron" --hparams="tacotron_gpu_start_idx=2"
# If you want to train on multiple GPUs, simply specify the number of GPUs available,
# and the idx of the first GPU to use. example:
# example 4 GPUs starting from index 0 (train on "/gpu0"->"/gpu3"): python train.py
# --model="Tacotron" --hparams="tacotron_num_gpus=4, tacotron_gpu_start_idx=0"
# The hparams arguments can be directly modified on this hparams.py file instead of being
# specified on run if preferred!
# If one wants to train both Tacotron and WaveNet in parallel (provided WaveNet will be
# trained on True mel spectrograms), one needs to specify different GPU idxes.
# example Tacotron+WaveNet on a machine with 4 or plus GPUs. Two GPUs for each model:
# CUDA_VISIBLE_DEVICES=0,1 python train.py --model="Tacotron"
# --hparams="tacotron_gpu_start_idx=0, tacotron_num_gpus=2"
# Cuda_VISIBLE_DEVICES=2,3 python train.py --model="WaveNet"
# --hparams="wavenet_gpu_start_idx=2; wavenet_num_gpus=2"
# IMPORTANT NOTE: If using N GPUs, please multiply the tacotron_batch_size by N below in the
# hparams! (tacotron_batch_size = 32 * N)
# Never use lower batch size than 32 on a single GPU!
# Same applies for Wavenet: wavenet_batch_size = 8 * N (wavenet_batch_size can be smaller than
# 8 if GPU is having OOM, minimum 2)
# Please also apply the synthesis batch size modification likewise. (if N GPUs are used for
# synthesis, minimal batch size must be N, minimum of 1 sample per GPU)
# We did not add an automatic multi-GPU batch size computation to avoid confusion in the
# user"s mind and to provide more control to the user for
# resources related decisions.
# Acknowledgement:
# Many thanks to @MlWoo for his awesome work on multi-GPU Tacotron which showed to work a
# little faster than the original
# pipeline for a single GPU as well. Great work!
# Hardware setup: Default supposes user has only one GPU: "/gpu:0" (Tacotron only for now!
# WaveNet does not support multi GPU yet, WIP)
# Synthesis also uses the following hardware parameters for multi-GPU parallel synthesis.
tacotron_gpu_start_idx=0, # idx of the first GPU to be used for Tacotron training.
tacotron_num_gpus=1, # Determines the number of gpus in use for Tacotron training.
split_on_cpu=True,
# Determines whether to split data on CPU or on first GPU. This is automatically True when
# more than 1 GPU is used.
###########################################################################################################################################
# Audio
# Audio parameters are the most important parameters to tune when using this work on your
# personal data. Below are the beginner steps to adapt
# this work to your personal data:
# 1- Determine my data sample rate: First you need to determine your audio sample_rate (how
# many samples are in a second of audio). This can be done using sox: "sox --i <filename>"
# (For this small tuto, I will consider 24kHz (24000 Hz), and defaults are 22050Hz,
# so there are plenty of examples to refer to)
# 2- set sample_rate parameter to your data correct sample rate
# 3- Fix win_size and and hop_size accordingly: (Supposing you will follow our advice: 50ms
# window_size, and 12.5ms frame_shift(hop_size))
# a- win_size = 0.05 * sample_rate. In the tuto example, 0.05 * 24000 = 1200
# b- hop_size = 0.25 * win_size. Also equal to 0.0125 * sample_rate. In the tuto
# example, 0.25 * 1200 = 0.0125 * 24000 = 300 (Can set frame_shift_ms=12.5 instead)
# 4- Fix n_fft, num_freq and upsample_scales parameters accordingly.
# a- n_fft can be either equal to win_size or the first power of 2 that comes after
# win_size. I usually recommend using the latter
# to be more consistent with signal processing friends. No big difference to be seen
# however. For the tuto example: n_fft = 2048 = 2**11
# b- num_freq = (n_fft / 2) + 1. For the tuto example: num_freq = 2048 / 2 + 1 = 1024 +
# 1 = 1025.
# c- For WaveNet, upsample_scales products must be equal to hop_size. For the tuto
# example: upsample_scales=[15, 20] where 15 * 20 = 300
# it is also possible to use upsample_scales=[3, 4, 5, 5] instead. One must only
# keep in mind that upsample_kernel_size[0] = 2*upsample_scales[0]
# so the training segments should be long enough (2.8~3x upsample_scales[0] *
# hop_size or longer) so that the first kernel size can see the middle
# of the samples efficiently. The length of WaveNet training segments is under the
# parameter "max_time_steps".
# 5- Finally comes the silence trimming. This very much data dependent, so I suggest trying
# preprocessing (or part of it, ctrl-C to stop), then use the
# .ipynb provided in the repo to listen to some inverted mel/linear spectrograms. That
# will first give you some idea about your above parameters, and
# it will also give you an idea about trimming. If silences persist, try reducing
# trim_top_db slowly. If samples are trimmed mid words, try increasing it.
# 6- If audio quality is too metallic or fragmented (or if linear spectrogram plots are
# showing black silent regions on top), then restart from step 2.
inv_mel_basis=None,
mel_basis=None,
num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
# network
rescale=True, # Whether to rescale audio prior to preprocessing
rescaling_max=0.9, # Rescaling value
# Whether to clip silence in Audio (at beginning and end of audio only, not the middle)
# train samples of lengths between 3sec and 14sec are more than enough to make a model capable
# of good parallelization.
clip_mels_length=True,
# For cases of OOM (Not really recommended, only use if facing unsolvable OOM errors,
# also consider clipping your samples to smaller chunks)
max_mel_frames=900,
# Only relevant when clip_mels_length = True, please only use after trying output_per_steps=3
# and still getting OOM errors.
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
# It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
# Does not work if n_ffit is not multiple of hop_size!!
use_lws=False,
# Only used to set as True if using WaveNet, no difference in performance is observed in
# either cases.
silence_threshold=2, # silence threshold used for sound trimming for wavenet preprocessing
# Mel spectrogram
n_fft=800, # Extra window size is filled with 0 paddings to match this parameter
hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i <filename>)
frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5)
# M-AILABS (and other datasets) trim params (these parameters are usually correct for any
# data, but definitely must be tuned for specific speakers)
trim_fft_size=512,
trim_hop_size=128,
trim_top_db=23,
# Mel and Linear spectrograms normalization/scaling and clipping
signal_normalization=True,
# Whether to normalize mel spectrograms to some predefined range (following below parameters)
allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True
symmetric_mels=True,
# Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2,
# faster and cleaner convergence)
max_abs_value=4.,
# max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not
# be too big to avoid gradient explosion,
# not too small for fast convergence)
normalize_for_wavenet=True,
# whether to rescale to [0, 1] for wavenet. (better audio quality)
clip_for_wavenet=True,
# whether to clip [-max, max] before training/synthesizing with wavenet (better audio quality)
# Contribution by @begeekmyfriend
# Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude
# levels. Also allows for better G&L phase reconstruction)
preemphasize=True, # whether to apply filter
preemphasis=0.97, # filter coefficient.
# Limits
min_level_db=-100,
ref_level_db=20,
fmin=55,
# Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To
# test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
fmax=7600, # To be increased/reduced depending on data.
# Griffin Lim
power=1.5,
# Only used in G&L inversion, usually values between 1.2 and 1.5 are a good choice.
griffin_lim_iters=30, # 60,
# Number of G&L iterations, typically 30 is enough but we use 60 to ensure convergence.
###########################################################################################################################################
# Tacotron
outputs_per_step=2, # Was 1
# number of frames to generate at each decoding step (increase to speed up computation and
# allows for higher batch size, decreases G&L audio quality)
stop_at_any=True,
# Determines whether the decoder should stop when predicting <stop> to any frame or to all of
# them (True works pretty well)
embedding_dim=one * 4, # 512, # dimension of embedding space (these are NOT the speaker embeddings)
# Encoder parameters
enc_conv_num_layers=3, # number of encoder convolutional layers
enc_conv_kernel_size=(5,), # size of encoder convolution filters for each layer
enc_conv_channels=one * 4, # 512, # number of encoder convolutions filters for each layer
encoder_lstm_units=one * 2, # 256, # number of lstm units for each direction (forward and backward)
# Attention mechanism
smoothing=False, # Whether to smooth the attention normalization function
attention_dim=one * 1, # 128, # dimension of attention space
attention_filters=32, # number of attention convolution filters
attention_kernel=(31,), # kernel size of attention convolution
cumulative_weights=True,
# Whether to cumulate (sum) all previous attention weights or simply feed previous weights (
# Recommended: True)
# Decoder
prenet_layers=[one * 2, one * 2], # [256, 256], # number of layers and number of units of prenet
decoder_layers=2, # number of decoder lstm layers
decoder_lstm_units=one * 8, # 1024, # number of decoder lstm units on each layer
max_iters=2000,
# Max decoder steps during inference (Just for safety from infinite loop cases)
# Residual postnet
postnet_num_layers=5, # number of postnet convolutional layers
postnet_kernel_size=(5,), # size of postnet convolution filters for each layer
postnet_channels=one * 4, # 512, # number of postnet convolution filters for each layer
# CBHG mel->linear postnet
cbhg_kernels=8,
# All kernel sizes from 1 to cbhg_kernels will be used in the convolution bank of CBHG to act
# as "K-grams"
cbhg_conv_channels=one * 1, # 128, # Channels of the convolution bank
cbhg_pool_size=2, # pooling size of the CBHG
cbhg_projection=one * 2, # 256,
# projection channels of the CBHG (1st projection, 2nd is automatically set to num_mels)
cbhg_projection_kernel_size=3, # kernel_size of the CBHG projections
cbhg_highwaynet_layers=4, # Number of HighwayNet layers
cbhg_highway_units=one * 1, # 128, # Number of units used in HighwayNet fully connected layers
cbhg_rnn_units=one * 1, # 128,
# Number of GRU units used in bidirectional RNN of CBHG block. CBHG output is 2x rnn_units in
# shape
# Loss params
mask_encoder=True,
# whether to mask encoder padding while computing attention. Set to True for better prosody
# but slower convergence.
mask_decoder=False,
# Whether to use loss mask for padded sequences (if False, <stop_token> loss function will not
# be weighted, else recommended pos_weight = 20)
cross_entropy_pos_weight=20,
# Use class weights to reduce the stop token classes imbalance (by adding more penalty on
# False Negatives (FN)) (1 = disabled)
predict_linear=False,
# Whether to add a post-processing network to the Tacotron to predict linear spectrograms (
# True mode Not tested!!)
###########################################################################################################################################
# Tacotron Training
# Reproduction seeds
tacotron_random_seed=5339,
# Determines initial graph and operations (i.e: model) random state for reproducibility
tacotron_data_random_state=1234, # random state for train test split repeatability
# performance parameters
tacotron_swap_with_cpu=False,
# Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause
# major slowdowns! Only use when critical!)
# train/test split ratios, mini-batches sizes
tacotron_batch_size=64, # number of training samples on each training steps (was 32)
# Tacotron Batch synthesis supports ~16x the training batch size (no gradients during
# testing).
# Training Tacotron with unmasked paddings makes it aware of them, which makes synthesis times
# different from training. We thus recommend masking the encoder.
tacotron_synthesis_batch_size=128,
# DO NOT MAKE THIS BIGGER THAN 1 IF YOU DIDN"T TRAIN TACOTRON WITH "mask_encoder=True"!!
tacotron_test_size=None, # 0.05
# % of data to keep as test data, if None, tacotron_test_batches must be not None. (5% is
# enough to have a good idea about overfit)
tacotron_test_batches=2, # number of test batches.
# Learning rate schedule
tacotron_decay_learning_rate=True,
# boolean, determines if the learning rate will follow an exponential decay
tacotron_start_decay=10000, # 50000, # Step at which learning decay starts
tacotron_decay_steps=10000, # 50000, # Determines the learning rate decay slope (UNDER TEST)
tacotron_decay_rate=0.5, # learning rate decay rate (UNDER TEST)
tacotron_initial_learning_rate=1e-3, # starting learning rate
tacotron_final_learning_rate=1e-5, # minimal learning rate
# Optimization parameters
tacotron_adam_beta1=0.9, # AdamOptimizer beta1 parameter
tacotron_adam_beta2=0.999, # AdamOptimizer beta2 parameter
tacotron_adam_epsilon=1e-6, # AdamOptimizer Epsilon parameter
# Regularization parameters
tacotron_reg_weight=1e-7, # regularization weight (for L2 regularization)
tacotron_scale_regularization=False,
# Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is
# high and biasing the model)
tacotron_zoneout_rate=0.1, # zoneout rate for all LSTM cells in the network
tacotron_dropout_rate=0.5, # dropout rate for all convolutional layers + prenet
tacotron_clip_gradients=True, # whether to clip gradients
# Evaluation parameters
natural_eval=False,
# Whether to use 100% natural eval (to evaluate Curriculum Learning performance) or with same
# teacher-forcing ratio as in training (just for overfit)
# Decoder RNN learning can take be done in one of two ways:
# Teacher Forcing: vanilla teacher forcing (usually with ratio = 1). mode="constant"
# Curriculum Learning Scheme: From Teacher-Forcing to sampling from previous outputs is
# function of global step. (teacher forcing ratio decay) mode="scheduled"
# The second approach is inspired by:
# Bengio et al. 2015: Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks.
# Can be found under: https://arxiv.org/pdf/1506.03099.pdf
tacotron_teacher_forcing_mode="constant",
# Can be ("constant" or "scheduled"). "scheduled" mode applies a cosine teacher forcing ratio
# decay. (Preference: scheduled)
tacotron_teacher_forcing_ratio=1.,
# Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder
# inputs, Only relevant if mode="constant"
tacotron_teacher_forcing_init_ratio=1.,
# initial teacher forcing ratio. Relevant if mode="scheduled"
tacotron_teacher_forcing_final_ratio=0.,
# final teacher forcing ratio. Relevant if mode="scheduled"
tacotron_teacher_forcing_start_decay=10000,
# starting point of teacher forcing ratio decay. Relevant if mode="scheduled"
tacotron_teacher_forcing_decay_steps=280000,
# Determines the teacher forcing ratio decay slope. Relevant if mode="scheduled"
tacotron_teacher_forcing_decay_alpha=0.,
# teacher forcing ratio decay rate. Relevant if mode="scheduled"
###########################################################################################################################################
# Tacotron-2 integration parameters
train_with_GTA=False,
# Whether to use GTA mels to train WaveNet instead of ground truth mels.
###########################################################################################################################################
# Eval sentences (if no eval text file was specified during synthesis, these sentences are
# used for eval)
sentences=["你好语音克隆模型。"],
### SV2TTS ###
speaker_embedding_size=256,
silence_min_duration_split=0.4, # Duration in seconds of a silence for an utterance to be split
utterance_min_duration=1., # Duration in seconds below which utterances are discarded
))
def hparams_debug_string():
# values = hparams.values()
hp = [" %s: %s" % (key, value) for key, value in hparams.items()]
return "Hyperparameters:\n" + "\n".join(hp)
| 53.901685 | 143 | 0.691594 | [
"MIT",
"BSD-3-Clause"
] | kozzion/breaker_audio | breaker_audio/component_cmn/synthesizer/hparams.py | 19,249 | Python |
class Doer(object):
def __init__(self, frontend):
self.__frontend = frontend
async def do(self, action):
return await self.__frontend.do(action) | 24.285714 | 47 | 0.664706 | [
"Apache-2.0"
] | maxwell-dev/maxwell-client-python | maxwell/doer.py | 170 | Python |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.ims.v20200713 import models
class ImsClient(AbstractClient):
_apiVersion = '2020-07-13'
_endpoint = 'ims.tencentcloudapi.com'
_service = 'ims'
def DescribeImageStat(self, request):
"""控制台识别统计
:param request: Request instance for DescribeImageStat.
:type request: :class:`tencentcloud.ims.v20200713.models.DescribeImageStatRequest`
:rtype: :class:`tencentcloud.ims.v20200713.models.DescribeImageStatResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeImageStat", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeImageStatResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeImsList(self, request):
"""图片机器审核明细
:param request: Request instance for DescribeImsList.
:type request: :class:`tencentcloud.ims.v20200713.models.DescribeImsListRequest`
:rtype: :class:`tencentcloud.ims.v20200713.models.DescribeImsListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeImsList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeImsListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ImageModeration(self, request):
"""图片内容检测服务(Image Moderation, IM)能自动扫描图片,识别可能令人反感、不安全或不适宜的内容,同时支持用户配置图片黑名单,打击自定义识别类型的图片。
<div class="rno-api-explorer" style="margin-bottom:20px">
<div class="rno-api-explorer-inner">
<div class="rno-api-explorer-hd">
<div class="rno-api-explorer-title">
关于版本迭代的描述
</div>
</div>
<div class="rno-api-explorer-body">
<div class="rno-api-explorer-cont">
<p>当前页面版本为图片内容安全2020版本,2020.11.3日前接入的图片内容安全接口为2019版本,在此时间前接入的用户可直接访问以下链接进行维护操作:<a href="https://cloud.tencent.com/document/product/1125/38206" target="_blank">图片内容安全-2019版本</a></p>
<p>2020版本相对2019版本进行了升级,支持更灵活的多场景业务策略配置以及更丰富的识别回调信息,满足不同业务的识别需求,建议按照2020版本接入指引进行接口升级;同时,2019版本也会持续维护直至用户不再使用为止。</p>
</div>
</div>
</div>
</div>
:param request: Request instance for ImageModeration.
:type request: :class:`tencentcloud.ims.v20200713.models.ImageModerationRequest`
:rtype: :class:`tencentcloud.ims.v20200713.models.ImageModerationResponse`
"""
try:
params = request._serialize()
body = self.call("ImageModeration", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ImageModerationResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | 42.087302 | 204 | 0.613426 | [
"BSD-3-Clause"
] | HelloBarry/tencent_cloud_ops | tencentcloud/ims/v20200713/ims_client.py | 5,797 | Python |
class HomonymException(Exception):
def _init_ (self, *args):
super()._init_(args)
class Homonym():
def __init__(self):
pass
def CreateModel(self):
pass
def SgdScore(self, rounds):
pass
def FindErrors(self):
pass
| 14.631579 | 34 | 0.579137 | [
"MIT"
] | Biatris/Homonym | homonym.py | 278 | Python |
# First create a Shuffle list
my_shuffle_list = [1,2,3,4,5]
# Now Import shuffle
from random import shuffle
shuffle(my_shuffle_list)
print(my_shuffle_list) # check wether shuffle is working or not
# Now let's create Guess Game. First create a list
mylist = ['','o','']
# Define function which will used further
def shuffle_list(mylist):
shuffle(mylist)
return mylist
print(mylist) # First check your mylist without shuffle
print(shuffle_list(mylist)) # Now check that function for shuffle worning or not
# Now create function for user to take input as guess number
def user_guess():
guess = ''
while guess not in ['0','1','2']:
guess = input("Pick a number : 0, 1 or 2 : ")
return int(guess)
print(user_guess())
def check_guess(mylist,guess):
if mylist[guess] == 'o':
print('Correct Guess')
else:
print('Wrong Better luck next Time')
# Initial list
mylist = ['','o','']
#shuffle list
mixedup_list = shuffle_list(mylist)
# Get user guess
guess = user_guess()
check_guess(mixedup_list,guess)
| 27.075 | 84 | 0.6759 | [
"MIT"
] | alok-techqware/basic_python_practicse | python_basics/Method_ function/mixed_function_guess_game.py | 1,083 | Python |
# qubit number=5
# total number=45
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[1],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=39
prog.cz(input_qubit[1],input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=41
prog.h(input_qubit[0]) # number=31
prog.cz(input_qubit[1],input_qubit[0]) # number=32
prog.h(input_qubit[0]) # number=33
prog.x(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=34
prog.cz(input_qubit[1],input_qubit[0]) # number=35
prog.h(input_qubit[0]) # number=36
prog.x(input_qubit[1]) # number=10
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.x(input_qubit[2]) # number=26
prog.cx(input_qubit[0],input_qubit[2]) # number=27
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.cx(input_qubit[2],input_qubit[4]) # number=37
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit953.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 30.878788 | 82 | 0.603042 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | benchmark/startQiskit953.py | 4,076 | Python |
import numpy as np
import pandas as pd
X = np.load('preds.npy')
img = pd.read_csv('test.csv')
img['x1'] = X[:,0]*640
img['x2'] = X[:,1]*640
img['y1'] = X[:,2]*480
img['y2'] = X[:,3]*480
""" img['x1'] = 0.05*640
img['x2'] = 0.95*640
img['y1'] = 0.05*480
img['y2'] = 0.95*480 """
img.to_csv('subbigles.csv',index = False) | 24.538462 | 41 | 0.567398 | [
"MIT"
] | mukul54/Flipkart-Grid-Challenge | codes/write_csv.py | 319 | Python |
from sources import Sources
from stories import Stories
class Bot():
def __init__(self, config):
self.url = config.get_url()
self.sources = None
self.stories = None
def load(self):
self.sources = Sources(self.url)
self.stories = Stories(self.sources)
return self.stories.load()
def start(self, url):
message = 'Бот для сайта {0}'.format(url)
return message
def help(self):
message = "/get - читать истории из: \n\t{0}\n"\
"/random - случайные истории\n"\
"/stop - прервать диалог с ботом".format(
'\n\t'.join(['{0}'.format(y) for (x,y) in self.stories.get_description().items()]))
return message
def random(self, num=None, site_names=None):
if site_names is None:
site_names = list(self.stories.get_names().keys())
sites = list(self.stories.get_names().values())
messages = []
stories = self.stories.get(num=num, site_names=site_names,
sites=sites, random=True)
for s in stories:
messages.append(s.get().get('story'))
return messages
def get(self, num=None, site_names=None):
if site_names is None:
site_names = list(self.stories.get_names().keys())
sites = list(self.stories.get_names().values())
messages = []
stories = self.stories.get(num=num, site_names=site_names,
sites=sites)
for s in stories:
messages.append(s.get().get('story'))
return messages
def get_sources_sites(self):
sites = set()
for sites_list in self.sources.get():
for site in sites_list:
sites.add(site.get('site'))
return list(sites)
def get_sources_names(self, site):
names = set()
for sites_list in self.sources.get():
for s in sites_list:
if s.get('site') == site:
names.add((s.get('name'), s.get('desc')))
return list(names) | 34.403226 | 95 | 0.547586 | [
"MIT"
] | sagol/umorilibot | bot.py | 2,195 | Python |
from django.shortcuts import render
from accounts import models
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from accounts import serializers # Will use this to tell API what data to exect when making a POST PUT PATCH request to API
from accounts import models
from accounts import permissions
class Injection_DetailsViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating patient info readings"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.Injection_DetailsSerializer # This points to the
queryset = models.Injection_Details.objects.all()
permission_classes = (permissions.UpdateOwnReading, IsAuthenticated,) # Validates that a user is authenticated to read or modify objects
def get_queryset(self):
user = self.request.user
return models.Injection_Details.objects.get_queryset().filter(user_profile=user)
def perform_create(self, serializer): # overriding this function so that when a user tries to create an object they are validated as the current user
"""Sets the patient profile to the logged in user"""
serializer.save(user_profile=self.request.user) # This sets the user profile to the current user from the serializer passed in
#def create(self, serializer): # overriding this function so that when a user
#patient_info = models.PatientInfo.objects.filter(user_profile=self.request.user)
#serializer.save = self.get_serializer(patient_info, many = True) # This sets the user profile to the current user from the serializer passed in
#serializer.is_valid(raise_exceptions=True)
#self.perform_create(serializer)
#return Response(serializer.data) | 54.25 | 153 | 0.777266 | [
"MIT"
] | eliefrancois/project2-diabetesapplication-api | backend/accounts/views.py | 1,953 | Python |
def now():
print('2017-05-31')
now.__name__
f = now
f.__name__
# 定义记录log的装饰器
def log(func):
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now1():
print('2017-05-31')
# 如果 decorator 需要传入参数,需要编写一个返回 decorator 的高阶函数
def log1(text):
def decorator(func):
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
import functools
def log2(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
# 带参数
def log3(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
| 17.618182 | 53 | 0.562436 | [
"Apache-2.0"
] | zhayangtao/HelloPython | python01/PythonDecorator.py | 1,035 | Python |
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
| 31.202778 | 89 | 0.531968 | [
"CC0-1.0",
"BSD-3-Clause"
] | andrewcalderwood/flopy | flopy/mbase.py | 56,165 | Python |
df['A']
# A
# ---------
# -0.613035
# -1.265520
# 0.763851
# -1.248425
# 2.105805
# 1.763502
# -0.781973
# 1.400853
# -0.746025
# -1.120648
#
# [100 rows x 1 column] | 11.1875 | 23 | 0.497207 | [
"Apache-2.0"
] | 13927729580/h2o-3 | h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_select_column_name.py | 179 | Python |
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
| 35.641717 | 89 | 0.540895 | [
"MIT"
] | KristoforMaynard/SublimeAutoDocstring | docstring_styles.py | 35,713 | Python |
from importlib.machinery import SourceFileLoader
import io
import os.path
from setuptools import find_packages, setup
sourcedml = SourceFileLoader("sourced-ml-core", "./sourced/ml/core/__init__.py").load_module()
with io.open(os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8") as f:
long_description = f.read()
include_tests = os.getenv("ML_CORE_SETUP_INCLUDE_TESTS", False)
exclude_packages = (("sourced.ml.core.tests", "sourced.ml.core.tests.source")
if not include_tests else ())
tf_requires = ["tensorflow>=1.0,<1.14"]
tf_gpu_requires = ["tensorflow-gpu>=1.0,<1.14"]
package_data = {"": ["LICENSE.md", "README.md"]}
if include_tests:
test_data_dirs = ["./asdf/*.asdf", "./swivel/*", "identifiers.csv.tar.gz"]
package_data["sourced.ml.core.tests"] = test_data_dirs
setup(
name="sourced-ml-core",
description="Library containing the core algorithms for machine learning on source code. "
"Provides API and tools to train and use models based "
"on source code features extracted from Babelfish's UASTs.",
long_description=long_description,
long_description_content_type="text/markdown",
version=sourcedml.__version__,
license="Apache 2.0",
author="source{d}",
author_email="[email protected]",
url="https://github.com/src-d/ml-core",
download_url="https://github.com/src-d/ml-core",
packages=find_packages(exclude=exclude_packages),
namespace_packages=["sourced", "sourced.ml"],
keywords=[
"machine learning on source code",
"word2vec",
"id2vec",
"github",
"swivel",
"bow",
"bblfsh",
"babelfish",
],
install_requires=[
"PyStemmer>=1.3,<2.0",
"bblfsh>=3.1.0,<4.0",
"modelforge>=0.14.1",
"pygments>=2.2.0,<3.0",
"keras>=2.0,<3.0",
"scikit-learn>=0.21.1,<1.0",
"tqdm>=4.20,<5.0",
],
extras_require={"tf": tf_requires, "tf_gpu": tf_gpu_requires},
tests_require=["docker>=3.6.0,<4.0"],
package_data=package_data,
python_requires=">=3.5",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
],
)
| 35.410959 | 94 | 0.629014 | [
"Apache-2.0"
] | zurk/ml-core | setup.py | 2,585 | Python |
#
from binho.errors import DriverCapabilityError
class binhoAccessory:
""" Base class for objects representing accessory boards. """
# Optional: subclasses can set this variable to override their accessory name.
# If not provided, their name will automatically be taken from their class names.
# This typically doesn't need to be overridden.
ACCESSORY_NAME = None
@classmethod
def get_name(cls):
""" Default implementation of a function that returns a class's name. """
# If we have an overridden accessory name, return it.
if cls.ACCESSORY_NAME:
return cls.ACCESSORY_NAME
# Otherwise, return the given class's name.
return cls.__name__
@classmethod
def available_accessories(cls):
""" Returns a list of available neighbors. """
return [accessory.get_name() for accessory in cls.__subclasses__()]
@classmethod
def from_name(cls, name, board, *args, **kwargs):
""" Creates a new binhoAccessory object from its name. """
target_name = name.lower()
for subclass in cls.__subclasses__():
# Grab the class's name, and check to see if it matches ours.
subclass_name = subclass.get_name()
# If this class matches our target name, this is the class we're looking for!
# Create an instance and return it.
if target_name == subclass_name.lower():
return subclass(board, *args, **kwargs)
raise DriverCapabilityError("No known driver for accessory '{}'.".format(name))
| 34.565217 | 89 | 0.657233 | [
"BSD-3-Clause"
] | binhollc/binho-python-package | binho/accessory.py | 1,590 | Python |
# coding: utf-8
"""
flyteidl/service/admin.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import flyteadmin
from flyteadmin.models.admin_pager_duty_notification import AdminPagerDutyNotification # noqa: E501
from flyteadmin.rest import ApiException
class TestAdminPagerDutyNotification(unittest.TestCase):
"""AdminPagerDutyNotification unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAdminPagerDutyNotification(self):
"""Test AdminPagerDutyNotification"""
# FIXME: construct object with mandatory attributes with example values
# model = flyteadmin.models.admin_pager_duty_notification.AdminPagerDutyNotification() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.463415 | 119 | 0.737548 | [
"Apache-2.0"
] | EngHabu/flyteidl | gen/pb_python/flyteidl/service/flyteadmin/test/test_admin_pager_duty_notification.py | 1,044 | Python |
__author__ = 'Burgos, Agustin - Schelotto, Jorge'
# -*- coding: utf-8 -*-
# Copyright 2018 autors: Burgos Agustin, Schelotto Jorge
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pygame
class Palabras(pygame.sprite.Sprite):
def __init__(self, ruta, nombre, x, y):
super().__init__()
self.__palabra = nombre
self.__click = False
self.image = pygame.image.load(ruta).convert_alpha()
self.rect = self.image.get_rect()
self.collide = False
self.posX = x
self.posY = y
def getPosX(self):
return self.posX
def getPosY(self):
return self.posY
def getPalabra(self):
return self.__palabra
def getPalabraImagen(self):
return self.image
def setClick(self, bool):
self.__click = bool
def getClick(self):
return self.__click
def getRect(self):
return self.rect
def colli(self, x, y):
if x > 20:
# Achica la imagen
center = self.rect.center
x = x - 1
y = y - 1
self.image = pygame.transform.scale(self.image, (x, y))
self.rect = self.image.get_rect()
self.rect.center = center
self.image = pygame.transform.rotozoom(self.image, -90, 0.8)
elif x <= 20:
# Para que no de x < 0
center = self.rect.center
self.image = pygame.transform.scale(self.image, (0, 0))
self.rect = self.image.get_rect()
self.rect.center = center
self.image = pygame.transform.rotozoom(self.image, -90, 0.5)
def update(self,surface):
"""Controla los eventos y coliciones de los sprites Palabras"""
if not self.getClick() and not self.collide:
self.rect.center = (self.posX, self.posY)
if self.getClick():
#Si se hace click en la imagen
self.rect.center = pygame.mouse.get_pos()
if self.collide:
# Si hay colision
x = self.image.get_rect().size[0]
y = self.image.get_rect().size[1]
self.colli(x,y)
# Saca la imagen de la zona de colición.
if self.image.get_rect().size[0] <= 20:
self.rect.center = (0,0)
surface.blit(self.getPalabraImagen(), self.getRect())
| 35.978495 | 128 | 0.633293 | [
"MIT"
] | JorgeSchelotto/TrabajoFinalSeminarioPython | Clases/Palabras.py | 3,347 | Python |
"""
Configuration file for py.test
"""
import django
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
USE_I18N=True,
ROOT_URLCONF="tests.urls",
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.sqlite3",
}
},
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"simple_auth",
],
MIDDLEWARE=[
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"simple_auth.middleware.SimpleAuthMiddleware",
],
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
],
SITE_ID=1,
)
django.setup()
| 27.111111 | 77 | 0.536066 | [
"BSD-2-Clause"
] | bennylope/django-simple-auth | conftest.py | 1,220 | Python |
import sys
from subprocess import Popen
import cx_Oracle
root_directory = sys.argv[1]
def main(directory):
public_projects = get_public_project_accessions()
for project_accession in public_projects:
Popen(['./runAnnotator.sh', directory, str(project_accession)])
# get all the project references from pride archive
def get_public_project_accessions():
accessions = list()
archive_cursor = connect_archive()
archive_cursor.execute(
"select accession from project where (submission_type='PRIDE' or submission_type='COMPLETE') and is_public = 1")
projects = archive_cursor.fetchall()
for project in projects:
accessions.append(project[0])
archive_cursor.close()
return accessions
# connect to pride archive database
def connect_archive():
# connect to archive database
archive_db = cx_Oracle.connect(
"${pride.repo.db.user}/${pride.repo.db.password}@(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=ora-vm-032.ebi.ac.uk)(PORT=1531))(CONNECT_DATA=(SERVICE_NAME=PRIDEPRO)))")
# Create an cursor object for archive database
return archive_db.cursor()
if __name__ == '__main__':
main(root_directory) | 28.261905 | 173 | 0.730413 | [
"Apache-2.0"
] | PRIDE-Cluster/cluster-result-importer | scripts/batchAnnotator.py | 1,187 | Python |
# Timing functionality from Python's built-in module
from time import perf_counter
from functools import lru_cache
def timer(fn):
def inner(*args):
start = perf_counter()
result = fn(*args)
end = perf_counter()
elapsed = end - start
print(result)
print('elapsed', elapsed)
return inner
@timer
def calc_factorial(num):
if num < 0:
raise ValueError('Please use a number not smaller than 0')
product = 1
for i in range(num):
product = product * (i+1)
return product
# @timer
# @lru_cache()
# def fib(n):
# if n < 2:
# return n
# return fib(n-1) + fib(n-2)
if __name__ == '__main__':
calc_factorial(88)
# fib(25)
| 17.853659 | 66 | 0.592896 | [
"MIT"
] | zubrik13/udacity_inter_py | lesson3-functional_programming/timing.py | 732 | Python |
from echo import CallbackProperty, SelectionCallbackProperty, keep_in_sync, delay_callback
from matplotlib.colors import to_rgba
from glue.core.message import LayerArtistUpdatedMessage
from glue.core.state_objects import State
from glue.viewers.common.state import ViewerState, LayerState
from glue.utils import defer_draw, avoid_circular
__all__ = ['DeferredDrawSelectionCallbackProperty', 'DeferredDrawCallbackProperty',
'MatplotlibDataViewerState', 'MatplotlibLayerState']
class DeferredDrawCallbackProperty(CallbackProperty):
"""
A callback property where drawing is deferred until
after notify has called all callback functions.
"""
@defer_draw
def notify(self, *args, **kwargs):
super(DeferredDrawCallbackProperty, self).notify(*args, **kwargs)
class DeferredDrawSelectionCallbackProperty(SelectionCallbackProperty):
"""
A callback property where drawing is deferred until
after notify has called all callback functions.
"""
@defer_draw
def notify(self, *args, **kwargs):
super(DeferredDrawSelectionCallbackProperty, self).notify(*args, **kwargs)
VALID_WEIGHTS = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
VALID_LOCATIONS = ['draggable', 'best',
'upper right', 'upper left',
'lower left', 'lower right',
'center left', 'center right',
'lower center', 'upper center']
class MatplotlibLegendState(State):
"""The legend state"""
visible = DeferredDrawCallbackProperty(False, docstring="Whether to show the legend")
location = DeferredDrawSelectionCallbackProperty(0, docstring="The location of the legend in the axis")
title = DeferredDrawCallbackProperty("", docstring='The title of the legend')
fontsize = DeferredDrawCallbackProperty(10, docstring='The font size of the title')
alpha = DeferredDrawCallbackProperty(0.6, docstring='Transparency of the legend frame')
frame_color = DeferredDrawCallbackProperty("#ffffff", docstring='Frame color of the legend')
show_edge = DeferredDrawCallbackProperty(True, docstring="Whether to show the edge of the frame ")
text_color = DeferredDrawCallbackProperty("#000000", docstring='Text color of the legend')
def __init__(self, *args, **kwargs):
MatplotlibLegendState.location.set_choices(self, VALID_LOCATIONS)
super().__init__(*args, **kwargs)
self._set_color_choices()
def _set_color_choices(self):
from glue.config import settings
self.frame_color = settings.BACKGROUND_COLOR
self.text_color = settings.FOREGROUND_COLOR
@property
def edge_color(self):
if self.show_edge:
return to_rgba(self.text_color, self.alpha)
else:
return None
@property
def draggable(self):
return self.location == 'draggable'
@property
def mpl_location(self):
if self.location == 'draggable':
return 'best'
else:
return self.location
def update_axes_settings_from(self, state):
self.visible = state.show_legend
self.loc_and_drag = state.loc_and_drag
self.alpha = state.alpha
self.title = state.title
self.fontsize = state.fontsize
self.frame_color = state.frame_color
self.show_edge = state.show_edge
self.text_color = state.text_color
class MatplotlibDataViewerState(ViewerState):
"""
A base class that includes common attributes for viewers based on
Matplotlib.
"""
x_min = DeferredDrawCallbackProperty(docstring='Lower limit of the visible x range')
x_max = DeferredDrawCallbackProperty(docstring='Upper limit of the visible x range')
y_min = DeferredDrawCallbackProperty(docstring='Lower limit of the visible y range')
y_max = DeferredDrawCallbackProperty(docstring='Upper limit of the visible y range')
x_log = DeferredDrawCallbackProperty(False, docstring='Whether the x axis is logarithmic')
y_log = DeferredDrawCallbackProperty(False, docstring='Whether the y axis is logarithmic')
aspect = DeferredDrawCallbackProperty('auto', docstring='Aspect ratio for the axes')
show_axes = DeferredDrawCallbackProperty(True, docstring='Whether the axes are shown')
x_axislabel = DeferredDrawCallbackProperty('', docstring='Label for the x-axis')
y_axislabel = DeferredDrawCallbackProperty('', docstring='Label for the y-axis')
x_axislabel_size = DeferredDrawCallbackProperty(10, docstring='Size of the x-axis label')
y_axislabel_size = DeferredDrawCallbackProperty(10, docstring='Size of the y-axis label')
x_axislabel_weight = DeferredDrawSelectionCallbackProperty(1, docstring='Weight of the x-axis label')
y_axislabel_weight = DeferredDrawSelectionCallbackProperty(1, docstring='Weight of the y-axis label')
x_ticklabel_size = DeferredDrawCallbackProperty(8, docstring='Size of the x-axis tick labels')
y_ticklabel_size = DeferredDrawCallbackProperty(8, docstring='Size of the y-axis tick labels')
def __init__(self, *args, **kwargs):
self._axes_aspect_ratio = None
MatplotlibDataViewerState.x_axislabel_weight.set_choices(self, VALID_WEIGHTS)
MatplotlibDataViewerState.y_axislabel_weight.set_choices(self, VALID_WEIGHTS)
super(MatplotlibDataViewerState, self).__init__(*args, **kwargs)
self.legend = MatplotlibLegendState(*args, **kwargs)
self.add_callback('aspect', self._adjust_limits_aspect, priority=10000)
self.add_callback('x_min', self._adjust_limits_aspect_x, priority=10000)
self.add_callback('x_max', self._adjust_limits_aspect_x, priority=10000)
self.add_callback('y_min', self._adjust_limits_aspect_y, priority=10000)
self.add_callback('y_max', self._adjust_limits_aspect_y, priority=10000)
def _set_axes_aspect_ratio(self, value):
"""
Set the aspect ratio of the axes in which the visualization is shown.
This is a private method that is intended only for internal use, and it
allows this viewer state class to adjust the limits accordingly when
the aspect callback property is set to 'equal'
"""
self._axes_aspect_ratio = value
self._adjust_limits_aspect(aspect_adjustable='both')
def _adjust_limits_aspect_x(self, *args):
self._adjust_limits_aspect(aspect_adjustable='y')
def _adjust_limits_aspect_y(self, *args):
self._adjust_limits_aspect(aspect_adjustable='x')
@avoid_circular
def _adjust_limits_aspect(self, *args, **kwargs):
"""
Adjust the limits of the visualization to take into account the aspect
ratio. This only works if `_set_axes_aspect_ratio` has been called
previously.
"""
if self.aspect == 'auto' or self._axes_aspect_ratio is None:
return
if self.x_min is None or self.x_max is None or self.y_min is None or self.y_max is None:
return
aspect_adjustable = kwargs.pop('aspect_adjustable', 'auto')
changed = None
# Find axes aspect ratio
axes_ratio = self._axes_aspect_ratio
# Put the limits in temporary variables so that we only actually change
# them in one go at the end.
x_min, x_max = self.x_min, self.x_max
y_min, y_max = self.y_min, self.y_max
# Find current data ratio
data_ratio = abs(y_max - y_min) / abs(x_max - x_min)
# Only do something if the data ratio is sufficiently different
# from the axes ratio.
if abs(data_ratio - axes_ratio) / (0.5 * (data_ratio + axes_ratio)) > 0.01:
# We now adjust the limits - which ones we adjust depends on
# the adjust keyword. We also make sure we preserve the
# mid-point of the current coordinates.
if aspect_adjustable == 'both':
# We need to adjust both at the same time
x_mid = 0.5 * (x_min + x_max)
x_width = abs(x_max - x_min) * (data_ratio / axes_ratio) ** 0.5
y_mid = 0.5 * (y_min + y_max)
y_width = abs(y_max - y_min) / (data_ratio / axes_ratio) ** 0.5
x_min = x_mid - x_width / 2.
x_max = x_mid + x_width / 2.
y_min = y_mid - y_width / 2.
y_max = y_mid + y_width / 2.
elif (aspect_adjustable == 'auto' and data_ratio > axes_ratio) or aspect_adjustable == 'x':
x_mid = 0.5 * (x_min + x_max)
x_width = abs(y_max - y_min) / axes_ratio
x_min = x_mid - x_width / 2.
x_max = x_mid + x_width / 2.
else:
y_mid = 0.5 * (y_min + y_max)
y_width = abs(x_max - x_min) * axes_ratio
y_min = y_mid - y_width / 2.
y_max = y_mid + y_width / 2.
with delay_callback(self, 'x_min', 'x_max', 'y_min', 'y_max'):
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
def update_axes_settings_from(self, state):
# axis
self.x_axislabel_size = state.x_axislabel_size
self.y_axislabel_size = state.y_axislabel_size
self.x_axislabel_weight = state.x_axislabel_weight
self.y_axislabel_weight = state.y_axislabel_weight
self.x_ticklabel_size = state.x_ticklabel_size
self.y_ticklabel_size = state.y_ticklabel_size
# legend
self.legend.update_axes_settings_from(state.legend)
@defer_draw
def _notify_global(self, *args, **kwargs):
super(MatplotlibDataViewerState, self)._notify_global(*args, **kwargs)
def _update_priority(self, name):
if name == 'layers':
return 2
elif name.endswith('_log'):
return 0.5
elif name.endswith(('_min', '_max')):
return 0
else:
return 1
class MatplotlibLayerState(LayerState):
"""
A base class that includes common attributes for all layers in viewers based
on Matplotlib.
"""
color = DeferredDrawCallbackProperty(docstring='The color used to display '
'the data')
alpha = DeferredDrawCallbackProperty(docstring='The transparency used to '
'display the data')
def __init__(self, viewer_state=None, **kwargs):
super(MatplotlibLayerState, self).__init__(viewer_state=viewer_state, **kwargs)
self.color = self.layer.style.color
self.alpha = self.layer.style.alpha
self._sync_color = keep_in_sync(self, 'color', self.layer.style, 'color')
self._sync_alpha = keep_in_sync(self, 'alpha', self.layer.style, 'alpha')
self.add_global_callback(self._notify_layer_update)
def _notify_layer_update(self, **kwargs):
message = LayerArtistUpdatedMessage(self)
if self.layer is not None and self.layer.hub is not None:
self.layer.hub.broadcast(message)
@defer_draw
def _notify_global(self, *args, **kwargs):
super(MatplotlibLayerState, self)._notify_global(*args, **kwargs)
| 38.404762 | 107 | 0.666726 | [
"BSD-3-Clause"
] | cnheider/glue | glue/viewers/matplotlib/state.py | 11,291 | Python |
#!/usr/bin/env python3
import os
import platform
import shutil
import sys
from urllib import request
import bs4
import patoolib
url = "http://bearware.dk/teamtalksdk"
cd = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_url_suffix_from_platform() -> str:
machine = platform.machine()
if sys.platform == "win32":
architecture = platform.architecture()
if machine == "AMD64" or machine == "x86":
if architecture[0] == "64bit":
return "win64"
else:
return "win32"
else:
sys.exit("Native Windows on ARM is not suported")
elif sys.platform == "darwin":
sys.exit("Darwin is not supported")
else:
if machine == "AMD64" or machine == "x86_64":
return "ubuntu18_x86_64"
elif "arm" in machine:
return "raspbian_armhf"
else:
sys.exit("Your architecture is not supported")
def download() -> None:
r = request.urlopen(url)
html = r.read().decode("UTF-8")
page = bs4.BeautifulSoup(html, features="html.parser")
versions = page.find_all("li")
last_version = versions[-1].a.get("href")[0:-1]
download_url = (
url
+ "/"
+ last_version
+ "/"
+ "tt5sdk_{v}_{p}.7z".format(v=last_version, p=get_url_suffix_from_platform())
)
print("Downloading from " + download_url)
request.urlretrieve(download_url, os.path.join(cd, "ttsdk.7z"))
def extract() -> None:
try:
os.mkdir(os.path.join(cd, "ttsdk"))
except FileExistsError:
shutil.rmtree(os.path.join(cd, "ttsdk"))
os.mkdir(os.path.join(cd, "ttsdk"))
patoolib.extract_archive(
os.path.join(cd, "ttsdk.7z"), outdir=os.path.join(cd, "ttsdk")
)
def move() -> None:
path = os.path.join(cd, "ttsdk", os.listdir(os.path.join(cd, "ttsdk"))[0])
try:
if sys.platform == "win32":
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/TeamTalk5.dll"),
os.path.join(cd, "TeamTalk5.dll"),
)
else:
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/libTeamTalk5.so"),
os.path.join(cd, "libTeamTalk5.so"),
)
except FileExistsError:
if sys.platform == "win32":
os.remove(os.path.join(cd, "TeamTalk5.dll"))
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/TeamTalk5.dll"),
os.path.join(cd, "TeamTalk5.dll"),
)
else:
os.remove(os.path.join(cd, "libTeamTalk5.so"))
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/libTeamTalk5.so"),
os.path.join(cd, "libTeamTalk5.so"),
)
try:
os.rename(
os.path.join(path, "Library/TeamTalkPy"), os.path.join(cd, "TeamTalkPy")
)
except OSError:
shutil.rmtree(os.path.join(cd, "TeamTalkPy"))
os.rename(
os.path.join(path, "Library/TeamTalkPy"), os.path.join(cd, "TeamTalkPy")
)
try:
os.rename(
os.path.join(path, "License.txt"), os.path.join(cd, "TTSDK_license.txt")
)
except FileExistsError:
os.remove(os.path.join(cd, "TTSDK_license.txt"))
os.rename(
os.path.join(path, "License.txt"), os.path.join(cd, "TTSDK_license.txt")
)
def clean() -> None:
os.remove(os.path.join(cd, "ttsdk.7z"))
shutil.rmtree(os.path.join(cd, "ttsdk"))
def install() -> None:
print("Installing TeamTalk sdk components")
print("Downloading latest sdk version")
download()
print("Downloaded. extracting")
extract()
print("Extracted. moving")
move()
print("moved. cleaning")
clean()
print("cleaned.")
print("Installed")
if __name__ == "__main__":
install()
| 28.933333 | 86 | 0.569636 | [
"MIT"
] | ahmetecevitli/TTMediaBot | tools/ttsdk_downloader.py | 3,906 | Python |
from . import (
yaw,
layout,
base_COE,
optimization,
layout_height,
power_density,
yaw_wind_rose,
power_density_1D,
yaw_wind_rose_parallel,
)
| 14.833333 | 27 | 0.651685 | [
"Apache-2.0"
] | ArnaudRobert/floris | floris/tools/optimization/scipy/__init__.py | 178 | Python |
# Copyright (C) 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Auto-generated file for MCP4725 v0.1.0.
# Generated from peripherals/MCP4725.yaml using Cyanobyte Codegen v0.1.0
"""
Class for MCP4725
"""
from machine import I2C
DIGITALOUT_GND = 0 # Ground
DIGITALOUT_VCC = 4095 # Vcc (full power)
def _swap_endian(val, length):
"""
Swap the endianness of a number
"""
if length <= 8:
return val
if length <= 16:
return (val & 0xFF00) >> 8 | (val & 0xFF) << 8
if length <= 32:
return ((val & 0xFF000000) >> 24 |
(val & 0x00FF0000) >> 8 |
(val & 0x0000FF00) << 8 |
(val & 0x000000FF) << 24)
raise Exception('Cannot swap endianness for length ' + length)
class MCP4725:
"""
Microchip 4725 Digital-to-Analog Converter
"""
device_address = 98
REGISTER_EEPROM = 96
REGISTER_VOUT = 64
def __init__(self, i2c):
# Initialize connection to peripheral
self.i2c = i2c
def get_eeprom(self):
"""
If EEPROM is set, the saved voltage output will
be loaded from power-on.
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_EEPROM,
1,
addrsize=12
)
val = 0
val = val << 8 | byte_list[0]
val = _swap_endian(val, 12)
return val
def set_eeprom(self, data):
"""
If EEPROM is set, the saved voltage output will
be loaded from power-on.
"""
data = _swap_endian(data, 12)
buffer = []
buffer[0] = (data >> 0) & 0xFF
self.i2c.writeto_mem(
self.device_address,
self.REGISTER_EEPROM,
buffer,
addrsize=12
)
def get_vout(self):
"""
VOut = (Vcc * value) / 4096
The output is a range between 0 and Vcc with
steps of Vcc/4096.
In a 3.3v system, each step is 800 microvolts.
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_VOUT,
1,
addrsize=12
)
val = 0
val = val << 8 | byte_list[0]
val = _swap_endian(val, 12)
return val
def set_vout(self, data):
"""
VOut = (Vcc * value) / 4096
The output is a range between 0 and Vcc with
steps of Vcc/4096.
In a 3.3v system, each step is 800 microvolts.
"""
data = _swap_endian(data, 12)
buffer = []
buffer[0] = (data >> 0) & 0xFF
self.i2c.writeto_mem(
self.device_address,
self.REGISTER_VOUT,
buffer,
addrsize=12
)
def get_digitalout(self):
"""
Only allows you to send fully on or off
"""
# Read register data
# '#/registers/EEPROM' > 'EEPROM'
val = self.get_eeprom()
# Mask register value
val = val & 0b0001111111111111
return val
def set_digitalout(self, data):
"""
Only allows you to send fully on or off
"""
# Read current register data
# '#/registers/EEPROM' > 'EEPROM'
register_data = self.get_eeprom()
register_data = register_data | data
self.set_eeprom(register_data)
def getvout_asvoltage(self, vcc):
"""
get vout
"""
voltage = None # Variable declaration
# Read value of register into a variable
value = self.get_eeprom()
voltage = value / 4096 * vcc
return voltage
def setvout_asvoltage(self, output, vcc):
"""
set vout
"""
output = output / vcc * 4096
self.set_eeprom(output)
| 25.597633 | 74 | 0.561258 | [
"Apache-2.0"
] | google/cyanobyte | test/sampleData/micropython/MCP4725.py | 4,326 | Python |
from drawy import *
class Button:
def __init__(self, text, click_handler, point, width, height, *, hide=False, do_highlight=True, background_color='gray', highlight_color='lightgray', text_color='black', border_color='black'):
self.text = text
self.click_handler = click_handler
self.point = Point(*point)
self.width = width
self.height = height
self.hide = hide
self.do_highlight = do_highlight
self.background_color = background_color
self.highlight_color = highlight_color
self.text_color = text_color
self.border_color = border_color
def is_point_inside(self, point: Point):
return point.is_inside_rectangle(self.point, self.width, self.height)
def draw(self):
if self.hide:
return
background = self.background_color
if self.do_highlight and self.is_point_inside(MOUSE_POSITION):
background = self.highlight_color
draw_rectangle(self.point, self.width, self.height, background)
draw_rectangle(self.point, self.width, self.height, self.border_color, fill=False, border_thickness=4)
draw_text(self.text, self.point + Point(self.width, self.height) / 2, self.text_color)
def on_click(self):
if self.is_point_inside(MOUSE_POSITION) and self.click_handler:
self.click_handler()
BUTTONS = [
Button("SCORE", lambda: print('score!'), (100, 100), 200, 60),
Button("test", lambda: print("test!"), (100, 300), 200, 60),
]
def init():
pass
def draw():
for b in BUTTONS:
b.draw()
def on_click():
for b in BUTTONS:
b.on_click()
run(background_color='#ccc', title='Buttons test')
| 34.038462 | 197 | 0.640678 | [
"MIT"
] | NextLight/drawy | examples/button.py | 1,770 | Python |
# Charlie Conneely
# Score Keeper
from player import Player
ranks_file = "rankings.txt"
class ScoreKeeper:
def __init__(self):
self.ranks = []
"""
Check if player score ranks against scores in rankings.txt
"""
def check_ranking(self, p):
self.populate_ranks_array(ranks_file)
# check score against rankings
top5 = self.compare_score(p)
if top5:
print("Well Done! You ranked Top 5!")
print("\nNew Rankings:")
for i in self.ranks:
print(i.name + " - " + str(i.score))
self.append_file(ranks_file)
else:
print("Sorry, your score didn't rank top 5!")
print("\nCurrent Rankings:")
for i in self.ranks:
print(i.name + " - " + str(i.score))
# Clear ranks array
self.ranks = []
"""
Append ranks file with new score
"""
def append_file(self, rfile):
with open(rfile, 'w') as file:
for p in self.ranks:
file.write(str(p.name) + " " + str(p.score) + "\n")
"""
Check if score beats that of any currently ranked players
If true - Add player to rankings, resort array, pop last item from the end.
returns Boolean
"""
def compare_score(self, player):
does_rank = False
for p in self.ranks:
if (int(player.score) > int(p.score)):
does_rank = True
if does_rank:
self.ranks.append(player)
# sort ranks array by scores
self.ranks.sort(key=lambda p: int(p.score), reverse=True)
# remove the last item
self.ranks.pop()
return does_rank
"""
Populate local array with scores from txt file
"""
def populate_ranks_array(self, scores_file):
with open(scores_file) as f:
for line in f:
(n, s) = line.split()
self.ranks.append(Player(n,s))
| 28.428571 | 80 | 0.548241 | [
"MIT"
] | charlieconneely/countdown | score_system.py | 1,990 | Python |
import numpy as np
import pandas as pd
import scipy.stats
from .utils_complexity_embedding import complexity_embedding
from .entropy_shannon import entropy_shannon
def entropy_distribution(signal=None, delay=1, dimension=3, bins="Sturges", base=2):
"""**Distribution Entropy (DistrEn)**
Distribution Entropy (**DistrEn**, more commonly known as **DistEn**).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
bins : int or str
Method to find the number of bins. Can be a number, or one of ``"Sturges"``, ``"Rice"``,
``"Doane"``, or ``"sqrt"``.
base : int
The logarithmic base to use for :func:`entropy_shannon`.
Returns
--------
distren : float
The Distance Entropy entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
distren, info = nk.entropy_distribution(signal)
distren
References
-----------
* Li, P., Liu, C., Li, K., Zheng, D., Liu, C., & Hou, Y. (2015). Assessing the complexity of
short-term heartbeat interval series by distribution entropy. Medical & biological
engineering & computing, 53(1), 77-87.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {
"Dimension": dimension,
"Delay": delay,
"Bins": bins,
}
# Time-delay embedding
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
# Compute distance
n = len(embedded)
d = np.zeros(round(n * (n - 1) / 2))
for k in range(1, n):
Ix = (int((k - 1) * (n - k / 2)), int(k * (n - ((k + 1) / 2))))
d[Ix[0] : Ix[1]] = np.max(
abs(np.tile(embedded[k - 1, :], (n - k, 1)) - embedded[k:, :]), axis=1
)
# TODO: "D is symmetrical. Only the upper or lower triangular matrix will actually be adequate
# for the estimation of the ePDF, which can be used to facilitate its fast calculation."
n_d = len(d)
# Number of bins
if isinstance(bins, str):
bins = bins.lower()
if bins == "sturges":
n_bins = np.ceil(np.log2(n_d) + 1)
elif bins == "rice":
n_bins = np.ceil(2 * (n_d ** (1 / 3)))
elif bins == "sqrt":
n_bins = np.ceil(np.sqrt(n_d))
elif bins == "doanes":
sigma = np.sqrt(6 * (n_d - 2) / ((n_d + 1) * (n_d + 3)))
n_bins = np.ceil(1 + np.log2(n_d) + np.log2(1 + abs(scipy.stats.skew(d) / sigma)))
else:
raise Exception("Please enter a valid binning method")
else:
n_bins = bins
# Get probability
freq, _ = np.histogram(d, int(n_bins))
freq = freq / freq.sum()
# Compute Shannon Entropy
distren, _ = entropy_shannon(freq=freq, base=base)
# Normalize by number of bins (so that the range should be within [0, 1])
distren = distren / (np.log(n_bins) / np.log(base))
return distren, info
| 32.905172 | 98 | 0.596804 | [
"MIT"
] | danibene/NeuroKit | neurokit2/complexity/entropy_distribution.py | 3,817 | Python |
import numpy as np
import pandas as pd
import matlibplot.pyplot as plt
'''
Simulating Solow-Swan model, which attempts to model the long-run economic growth
by looking at capital accumulation (K), population growth (L) and technological
progress, which results in increase in productivity. It models the total production
of the economy using the constant-returns-to-scale Cobb-Douglas production function
Y(t) = K(t)^{alpha} * (A(t)L(t))^{1-alpha}, where
Y(t): a single good output at time t
K(t): the amount of capital at time t
L(t): population at time t
A(t): total factor productivity at time t
alpha: output elasticity of capital
with a law of motion:
I(t) = sY(t)
C(t) = (1-s)Y(t)
K(t+1) = (1-delta)K(t) + I(t)
L(t+1) = (1+n)L(t)
we can derive the law of motion for k(t) capital per capita:
k(t+1) = K(t+1)/N(t+1)
= ((1-delta)K(t) + I(t))/ (1+n)N(t)
= (1-delta)/(1+n) * k(t) + s/(1+n) A*K_t^alpha
as well as per capita output:
y(t) = Y(t)/N(t)
= Ak_t^alpha
where, I(t): total investment at time t
C(t): total consumption at time t
K(t): total capital at time t
L(t): total population at time t
s: the saving rate
delta: rate of capital depreciation
n: rate of population growth
This simulation allows user to take controls of those parameters and plot the simulated
total output growth. The program also enables user to query data from the Federal Reserve
Economic Data
'''
class solow:
'''
A: total factor productivity
k0: the initial amount of capital
delta: rate of depreciation of cpiatal
s: the saving rate
n: the population growth rate
alpha: output elasticity of capital
starting_year:
'''
def __init__(self, A=2.87, k0=3.5, delta = 0.08, s = 0.1, n = 0.015, alpha = 0.36, t0 = 1956, tmax = 2060):
self._A = A
self._k0 = k0
self._k = k0
self._delta = delta
self._s = s
self._n = n
self._alpha = alpha
self._t0 = t0
self._tmax = tmax
self._t = range(t0, tmax + 1)
self._y = np.zeros(len(self._t))
self._y[0] = self._A * (self._k0 ** self._alpha)
self._time_passed = 0
'''
this method returns all the variables in this model, which includes A, k0,
delta, s, n, alpha, t0, tax, Y, and t as a dictionary
'''
def get_variables(self):
return {
'A' : self._A,
'k0': self._k0,
'delta': self._delta,
's' : self._s,
'n' : self._n,
'alpha': self._alpha,
't0' : self._t0,
'tmax': self._tmax,
'y' : self._y,
't' : self._t }
'''
this method takes a list or dictionary as input and set the variables based on
the user's input. If the user inputs a list, it will treats the entries of list
as the values of A, k0, delta, s, n, alpha, t0, tmax, Y, t the user wants to
change into. If the user inputs a dictionary, the fields will be set according
to the keys.
Example:
set_variables({A: 2.87, k0: 3.5, delta:0.08, s:0.1, n:0.015, alpha:0.36, t0:1956, tmax:2060})
set_variables(2.87,3.5,0.08,0.1,0.015,0.36,1956,2060)
both achieve the same output
'''
def set_variables(self, vars):
if (type(vars) != type([]) or type(vars) != type({})):
raise ValueError('arguments must be either a dictionary or a list')
if (type(vars) == type([])):
if (len(vars) != 8):
raise ValueError('You must enter the following arguments: A, k0, delta, s, n, alpha, t0, tmax')
else:
self.setA(vars[0])
self.setK0(vars[1])
self.setDelta(vars[2])
self.setS(vars[3])
self.setN(vars[4])
self.setAlpha(vars[5])
self.setTRange(vars[6], vars[7])
if (type(vars) == type({})):
try:
self.setA(vars['A'])
self.setK0(vars['k0'])
self.setDelta(vars['delta'])
self.setS(vars['s'])
self.setN(vars['n'])
self.setAlpha(vars['alpha'])
self.setTRange(vars['t0'], vars['tmax'])
except KeyError:
raise ValueError("Your dictionary must have the keys A, k0, delta, s, n, alpha, t0, and tmax")
'''
setter for the field A (total factor productivity)
'''
def setA(self, A):
if (A < 0):
raise ValueError("A must be positive")
self._A = A
'''
setter for the field k0 (the initial amount of capital)
'''
def setK0(self,k0):
if(k0 < 0):
raise ValueError("k0 must be positive")
'''
setter for Delta (rate of depreciation of cpiatal)
'''
def setDelta(self, delta):
if (delta > 1 or delta < 0):
raise ValueError("depreciation rate must be in between 0 and 1")
self._delta = delta
'''
setter for S (saving rate)
'''
def setS(self, s):
if (s > 1 or s < 0):
raise ValueError("saving rate must be in between 0 and 1")
self.S = S
'''
setter for N (population growth rate)
'''
def setN(self,n):
self._n = n
'''
setter for alpha (output elasticity of capital)
'''
def setAlpha(self, alpha):
if (alpha < 0 or alpha > 1):
raise ValueError("alpha must be in between 0 and 1")
self._alpha = alpha
'''
setter for the time range
Example:
setTRange(1956, 2060): set the time range starting from 1956 to 2060
'''
def setTRange(self, start, end):
if (end < start):
raise ValueError("tmax must be greater than t0")
self._t0 = start
self._tmax = end
self._t = range(start, end+1)
'''
Start the simulation, and return the predicted value of Y
from the start period to the end period
TO BE IMPLEMENTED
'''
def simulate(self):
for t in self._t:
self._update()
return [self._y, self._t]
'''
Plot the prediction using matlibplot. x-axis would be year, y-axis would
the predicted GDP
TO BE IMPLEMENTED
'''
def plot(self):
pass
'''
store the output as a pandas dataframe
'''
def to_df(self):
return pd.DataFrame({'year' : self._t, 'gdp_per_capita' : self._y})
'''
export the output as a csv file to the user-provided location
TO BE IMPLEMENTED
'''
def to_csv(self, dir):
pass
'''
lunch the GUI, that enables more user-friendly interaction with the software
TO BE IMPLEMENTED
'''
def gui(self):
pass
'''
update all the fields according to the law of motion
TO BE IMPLEMENTED
'''
def _update(self):
#update k
self._k = (1-self._delta)/(1+self._n) * self._k + (self._s)/(1+n) * self._A * (self._k ** self._alpha)
# update t
self._time_passed += 1
#update y
self._y[self._time_passed] = self._A * (self._k ** self._alpha)
| 30.453782 | 111 | 0.560706 | [
"Apache-2.0"
] | zhaoy17/Macro_lib | macro_lib/growth/solow.py | 7,248 | Python |
#!/usr/local/bin/python
# based on code by henryk ploetz
# https://hackaday.io/project/5301-reverse-engineering-a-low-cost-usb-co-monitor/log/17909-all-your-base-are-belong-to-us
# and the wooga office weather project
# https://blog.wooga.com/woogas-office-weather-wow-67e24a5338
import os, sys, fcntl, time, socket
from prometheus_client import start_http_server, Gauge, Summary, Counter
import requests
def callback_function(error, result):
if error:
print(error)
return
print(result)
def hd(d):
return " ".join("%02X" % e for e in d)
def now():
return int(time.time())
# Create a metric to track time spent and requests made.
decrypt_time = Summary('decrypt_time_seconds', 'Time spent decrypting')
# Decorate function with metric.
@decrypt_time.time()
def decrypt(key, data):
cstate = [0x48, 0x74, 0x65, 0x6D, 0x70, 0x39, 0x39, 0x65]
shuffle = [2, 4, 0, 7, 1, 6, 5, 3]
phase1 = [0] * 8
for i, o in enumerate(shuffle):
phase1[o] = data[i]
phase2 = [0] * 8
for i in range(8):
phase2[i] = phase1[i] ^ key[i]
phase3 = [0] * 8
for i in range(8):
phase3[i] = ( (phase2[i] >> 3) | (phase2[ (i-1+8)%8 ] << 5) ) & 0xff
ctmp = [0] * 8
for i in range(8):
ctmp[i] = ( (cstate[i] >> 4) | (cstate[i]<<4) ) & 0xff
out = [0] * 8
for i in range(8):
out[i] = (0x100 + phase3[i] - ctmp[i]) & 0xff
return out
if __name__ == "__main__":
"""main"""
# use lock on socket to indicate that script is already running
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
## Create an abstract socket, by prefixing it with null.
s.bind('\0postconnect_gateway_notify_lock')
except socket.error, e:
# if script is already running just exit silently
sys.exit(0)
key = [0xc4, 0xc6, 0xc0, 0x92, 0x40, 0x23, 0xdc, 0x96]
fp = open(sys.argv[1], "a+b", 0)
HIDIOCSFEATURE_9 = 0xC0094806
set_report = "\x00" + "".join(chr(e) for e in key)
fcntl.ioctl(fp, HIDIOCSFEATURE_9, set_report)
values = {}
stamp = now()
notified = False
# define Gauge metrice for temp and co2
co2_metric = Gauge('co2_value', 'Current CO_2 Value from sensor')
temperature_metric = Gauge('temperature_value', 'Current Temperature Value from sensor')
# define loop counter
loop_counter = Counter('loops_total', 'Number of loops to query the sensor for values')
# Start up the server to expose the metrics.
start_http_server(8000)
while True:
loop_counter.inc()
data = list(ord(e) for e in fp.read(8))
decrypted = decrypt(key, data)
if decrypted[4] != 0x0d or (sum(decrypted[:3]) & 0xff) != decrypted[3]:
print hd(data), " => ", hd(decrypted), "Checksum error"
else:
op = decrypted[0]
val = decrypted[1] << 8 | decrypted[2]
values[op] = val
if (0x50 in values) and (0x42 in values):
co2 = values[0x50]
tmp = (values[0x42]/16.0-273.15)
# check if it's a sensible value
# (i.e. within the measuring range plus some margin)
if (co2 > 5000 or co2 < 0):
continue
if now() - stamp > 10:
print "TMP %3.1f" % (tmp)
temperature_metric.set(tmp)
print "CO2 %4i" % (co2)
co2_metric.set(co2)
print ">>>"
stamp = now()
| 30.452991 | 121 | 0.575639 | [
"MIT"
] | mvelten/office-weather | monitor.py | 3,563 | Python |
# -*- coding: utf-8 -*-
import os.path as osp
#import netCDF4
#from netcdf_helpers.reader import say_hello, get_time_series_from_location
#from plot.plot import plot_time_series_for_locations
#from sklearn.model_selection import train_test_split
#from sklearn.preprocessing import MinMaxScaler
import numpy as np
import xarray as xr
import pandas as pd
#import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import mglearn
#say_hello()
# set a path to the directory containing the data
directory = "/Users/houben/phd/hackathons/hida_datathon/data/MyChallengePaleo"
# set the file names
filename_temp_data_r1 = "T2m_R1_ym_1stMill.nc"
filename_temp_data_r2 = "T2m_R2_ym_1stMill.nc"
filename_solar_data = "Solar_forcing_1st_mill.nc"
filename_volc_data = "Volc_Forc_AOD_1st_mill.nc"
# load netCDF
#temp_data_r1 = netCDF4.Dataset(osp.join(directory, filename_temp_data_r1), "r")
#temp_data_r2 = netCDF4.Dataset(osp.join(directory, filename_temp_data_r2), "r")
temp_data_r1 = xr.open_dataset(osp.join(directory, filename_temp_data_r1))
temp_data_r2 = xr.open_dataset(osp.join(directory, filename_temp_data_r2))
#Understand the data more and see the levels of each column
df = temp_data_r1.to_dataframe()["T2m"]
print(df.index.get_level_values('time'))
timelist = df.index.get_level_values('time')
latlist = df.index.get_level_values('lat')
lonlist = df.index.get_level_values('lon')
#Reset the indices (I find it easier to work this way)
df_r1 = temp_data_r1.to_dataframe().reset_index(level=['lat', 'lon', 'time'])#["T2m"]
#Calculate a global annual mean temperature time series
Globalmeantemp = df_r1.groupby('time').mean()
#Calculate the mean of the time series to focus on the variation from the mean
mean = np.mean(Globalmeantemp["T2m"])
Var_frommean = Globalmeantemp["T2m"] - mean
plt.plot(Var_frommean)
from sklearn.cluster import KMeans
#Initialize the algorithm and fit it with the data
kmeans = KMeans(n_clusters = 5)
X = Var_frommean.to_numpy().reshape(-1,1)
kmeans.fit(X)
kmeans.cluster_centers_
print("Cluster memberships:\n{}".format(kmeans.labels_))
#Assign classes to each data point based on the model
classes = kmeans.predict(X)
#Inspect the centroids of the clusters
print(kmeans.cluster_centers_)
#Shortcut to see/visualize the datapoints and range of each cluster
mglearn.discrete_scatter(X, X, kmeans.labels_, markers='o')
#Volcanic activity is expected to have the maximum impact out of all forcings so look for the time points which are in the cluster associated with the lowest centroid
dip = np.argwhere(classes==np.argmin(kmeans.cluster_centers_))
#look for the years which have the biggest dips
dipinyear = list(int(timelist[i][0]/10000) for i in dip)
len(dipinyear)
# -----------------------------------------------------------------------------
# Apply a filter to the
# -----------------------------------------------------------------------------
from
# -----------------------------------------------------------------------------
shortlistedtimeseries = list(timelist[i][0] for i in dip)
#fourth column to group locations:
#df_r1['latlon'] = df_r1[['lat', 'lon']].apply(lambda x: ','.join(x.astype(str)), axis=1)
#the above step takes too long. look for alternatives. ALternatively, go for the original dataset
#locationmean = df_r1.groupby('latlon').mean()
locationmean = df_r1.groupby(['lat','lon']).mean() #testing alternative to above, much shorter
locationmean["mean"] = locationmean["T2m"]
df_r1_locmean = pd.merge(df_r1, locationmean[['T2m']], on = ['lat','lon']).rename(columns={'T2m_y':'mean'}) #merging the two dataframes
df_r1_locmean["Var"] = df_r1_locmean["T2m_x"] - df_r1_locmean["mean"] #calculating variation from mean of time series at respective location
#Filter the dataset and look at only the years which have the biggest dips for the data analysis/image analysis
#Also divide it into 6 zones as previously discussed: tropical, temperate and polar in northern and southern hemispheres
df_r1_time = df_r1_locmean[df_r1_locmean.time.isin(shortlistedtimeseries)]
df_North_trop = df_r1[(df_r1.lat>=0) & (df_r1.lat<30)]
df_North_temp = df_r1[(df_r1.lat>=30) & (df_r1.lat<60)]
df_North_polar = df_r1[df_r1.lat>=60]
df_South_trop = df_r1[(df_r1.lat>=-30) & (df_r1.lat<0)]
df_South_temp = df_r1[(df_r1.lat>=-60) & (df_r1.lat<-30)]
df_South_polar = df_r1[df_r1.lat<-60]
#Taking snapshots of years of interest: this needs to be broadened to consider the 5 year rolling window I think
kmeans = KMeans(n_clusters = 3)
for t in shortlistedtimeseries[:5]:
Y = df_r1_time[df_r1_time['time']==t]
series = Y["Var"]
X = series.to_numpy().reshape(-1,1)
# X = Var_frommean.to_numpy().reshape(-1,1)
kmeans.fit(X)
# print("Cluster memberships:\n{}".format(kmeans.labels_))
#Assign classes to each data point based on the model
classes = kmeans.predict(X)
Y["labels"] = classes
Y["plotlabels"] = kmeans.cluster_centers_[Y["labels"]] #To label the location with the corresponding cluster centroid
# print(kmeans.cluster_centers_)
plt.figure()
mglearn.discrete_scatter(Y['lon'], Y['lat'], Y["plotlabels"], markers='o')
plt.title("Year: "+str(int(t/10000)))
plt.legend()
| 46.734513 | 167 | 0.709903 | [
"MIT"
] | NatalieBarbosa/hida-datathon-ufz | SK_Clustering_WIP.py | 5,281 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.