hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7945466c457ae96d90093395a98cc1f5725a3073 | 488 | py | Python | preprocess_data/preprocess.py | ImranRiazChohan/ml_pipeline_using_kubeflow | 0c40355832b797734ae7cfac95000f35a722c1ff | [
"MIT"
] | null | null | null | preprocess_data/preprocess.py | ImranRiazChohan/ml_pipeline_using_kubeflow | 0c40355832b797734ae7cfac95000f35a722c1ff | [
"MIT"
] | null | null | null | preprocess_data/preprocess.py | ImranRiazChohan/ml_pipeline_using_kubeflow | 0c40355832b797734ae7cfac95000f35a722c1ff | [
"MIT"
] | null | null | null | from sklearn import datasets
from sklearn.model_selection import train_test_split
import numpy as np
def _preprocess_data():
x,y=datasets.load_boston(return_X_y=True)
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.33)
np.save('x_train.npy', x_train)
np.save('x_test.npy', x_test)
np.save('y_train.npy', y_train)
np.save('y_test.npy', y_test)
if __name__ == '__main__':
print("Preprocessing data....")
_preprocess_data() | 28.705882 | 71 | 0.70082 |
794546cc238e1a83528d033f3e4070d781d67bf9 | 1,876 | py | Python | clipl/analysis_modules/binerrorsofemptybins.py | thomas-mueller/clipl | 4c8c61dd4a09fee6ad2ec65f3baa6854cf9cce69 | [
"MIT"
] | null | null | null | clipl/analysis_modules/binerrorsofemptybins.py | thomas-mueller/clipl | 4c8c61dd4a09fee6ad2ec65f3baa6854cf9cce69 | [
"MIT"
] | null | null | null | clipl/analysis_modules/binerrorsofemptybins.py | thomas-mueller/clipl | 4c8c61dd4a09fee6ad2ec65f3baa6854cf9cce69 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
import logging
import clipl.utility.logger as logger
log = logging.getLogger(__name__)
import ROOT
import clipl.analysis_modules.histogrammanipulationbase as histogrammanipulationbase
class BinErrorsOfEmptyBins(histogrammanipulationbase.HistogramManipulationBase):
"""Set bin errors of empty bins to scale factor * sum of weights / entries of histogram."""
def modify_argument_parser(self, parser, args):
super(BinErrorsOfEmptyBins, self).modify_argument_parser(parser, args)
self.empty_bins_options = parser.add_argument_group("Empty bins errors options")
self.empty_bins_options.add_argument(
"--nicks-empty-bins", nargs="+", default=[],
help="Nicks of histograms to be corrected. [Default: all]"
)
self.empty_bins_options.add_argument(
"--empty-bin-error-scale", type=float, default=0.6,
help="Scale factor for bin errors of empty bins = sf * sum of weights / entries of histogram. [Default: %(default)s]"
)
def prepare_args(self, parser, plotData):
super(BinErrorsOfEmptyBins, self).prepare_args(parser, plotData)
if len(plotData.plotdict["nicks_empty_bins"]) > 0:
self.whitelist = plotData.plotdict["nicks_empty_bins"]
else:
self.whitelist = plotData.plotdict["nicks"]
def _selector(self, nick, root_histogram, plotData):
if isinstance(root_histogram, ROOT.TH1):
if root_histogram.GetEntries() == 0:
self.bin_error_for_empty_bins = 0.0
else:
self.bin_error_for_empty_bins = plotData.plotdict["empty_bin_error_scale"] * root_histogram.GetSumOfWeights() / root_histogram.GetEntries()
else:
return False
return super(BinErrorsOfEmptyBins, self)._selector(nick, root_histogram, plotData)
def _manipulate_bin(self, histogram, global_bin):
if histogram.GetBinContent(global_bin) == 0.0:
histogram.SetBinError(global_bin, self.bin_error_for_empty_bins)
| 35.396226 | 143 | 0.757996 |
7945472ae6f4641ddc7d38b81cb289f7897531df | 2,090 | py | Python | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/migrations/0001_initial.py | jimfmunro/cookiecutter-django | 41e4470c613488c5b6ae707130053d908216729f | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/migrations/0001_initial.py | jimfmunro/cookiecutter-django | 41e4470c613488c5b6ae707130053d908216729f | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/migrations/0001_initial.py | jimfmunro/cookiecutter-django | 41e4470c613488c5b6ae707130053d908216729f | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf8
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '__first__'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name=u'password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name=u'last login')),
('is_superuser', models.BooleanField(default=False, help_text=u'Designates that this user has all permissions without explicitly assigning them.', verbose_name=u'superuser status')),
('email', models.EmailField(unique=True, max_length=120, verbose_name=u'email field', db_index=True)),
('first_name', models.CharField(max_length=32, blank=True)),
('last_name', models.CharField(max_length=32, blank=True)),
('is_staff', models.BooleanField(default=False, help_text=u'Designates whether the user can log into this admin site.', verbose_name=u'staff status')),
('is_active', models.BooleanField(default=True, help_text=u'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name=u'active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name=u'date joined')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('groups', models.ManyToManyField(to='auth.Group', verbose_name=u'groups', blank=True)),
('user_permissions', models.ManyToManyField(to='auth.Permission', verbose_name=u'user permissions', blank=True)),
],
options={
u'abstract': False,
},
bases=(models.Model,),
),
]
| 56.486486 | 205 | 0.641148 |
794548506b30bf5cb6d9dfcdab98f32025ee7c8f | 2,442 | py | Python | sacn/messages/root_layer.py | BlakeGarner/sacn | 7eba57dc7177b3820c9d2f132512a91f70a76dcd | [
"MIT"
] | null | null | null | sacn/messages/root_layer.py | BlakeGarner/sacn | 7eba57dc7177b3820c9d2f132512a91f70a76dcd | [
"MIT"
] | null | null | null | sacn/messages/root_layer.py | BlakeGarner/sacn | 7eba57dc7177b3820c9d2f132512a91f70a76dcd | [
"MIT"
] | null | null | null | # This file is under MIT license. The license file can be obtained in the root directory of this module.
"""
This represents a root layer of an ACN Message.
Information about sACN: http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf
"""
_FIRST_INDEX = \
(0, 0x10, 0, 0, 0x41, 0x53, 0x43, 0x2d, 0x45,
0x31, 0x2e, 0x31, 0x37, 0x00, 0x00, 0x00)
VECTOR_E131_DATA_PACKET = (0, 0, 0, 0x02)
VECTOR_DMP_SET_PROPERTY = 0x02
VECTOR_ROOT_E131_DATA = (0, 0, 0, 0x4)
VECTOR_ROOT_E131_EXTENDED = (0, 0, 0, 0x8)
VECTOR_E131_EXTENDED_SYNCHRONIZATION = (0, 0, 0, 0x1)
VECTOR_E131_EXTENDED_DISCOVERY = (0, 0, 0, 0x2)
VECTOR_UNIVERSE_DISCOVERY_UNIVERSE_LIST = (0, 0, 0, 0x1)
class RootLayer:
def __init__(self, length: int, cid: tuple, vector: tuple):
self.length = length
if(len(vector) != 4):
raise ValueError('the length of the vector is not 4!')
self._vector = vector
if(len(cid) != 16):
raise ValueError('the length of the CID is not 16!')
self._cid = cid
def getBytes(self) -> list:
'''Returns the Root layer as list with bytes'''
tmpList = []
tmpList.extend(_FIRST_INDEX)
# first append the high byte from the Flags and Length
# high 4 bit: 0x7 then the bits 8-11(indexes) from _length
length = self.length - 16
tmpList.append((0x7 << 4) + (length >> 8))
# Then append the lower 8 bits from _length
tmpList.append(length & 0xFF)
tmpList.extend(self._vector)
tmpList.extend(self._cid)
return tmpList
@property
def length(self) -> int:
return self._length
@length.setter
def length(self, value: int):
self._length = value & 0xFFF # only use the least 12-Bit
def int_to_bytes(integer: int) -> list:
"""
Converts a single integer number to an list with the length 2 with highest byte first.
The returned list contains values in the range [0-255]
:param integer: the integer to convert
:return: the list with the high byte first
"""
return [(integer >> 8) & 0xFF, integer & 0xFF]
def make_flagsandlength(length: int) -> list:
"""
Converts a length value in a Flags and Length list with two bytes in the correct order.
:param length: the length to convert. should be 12-bit value
:return: the list with the two bytes
"""
return [(0x7 << 4) + ((length & 0xF00) >> 8), length & 0xFF]
| 33.452055 | 104 | 0.646601 |
7945490f22d0ebbd308ded9587e3248c14f3190b | 116 | py | Python | basi_python/list _concept/insert_list.py | KMONISH/learning-challenge-season-2 | be8271f3cbe958849e394807197d6179bbff5751 | [
"MIT"
] | null | null | null | basi_python/list _concept/insert_list.py | KMONISH/learning-challenge-season-2 | be8271f3cbe958849e394807197d6179bbff5751 | [
"MIT"
] | null | null | null | basi_python/list _concept/insert_list.py | KMONISH/learning-challenge-season-2 | be8271f3cbe958849e394807197d6179bbff5751 | [
"MIT"
] | null | null | null | List = ['hulk', 'ultron',"thor", 2011, 2015]
# Insert at index 2 value 10087
List.insert(5,2018)
print(List)
| 23.2 | 45 | 0.637931 |
7945494c0d8bd2c6ea53e40eaef2e9548110a66e | 20,138 | py | Python | c7n/resources/securityhub.py | LeonovecSergey/cloud-custodian | f1e7e2dd3679a275e2df49a86f9dee39f7845684 | [
"Apache-2.0"
] | null | null | null | c7n/resources/securityhub.py | LeonovecSergey/cloud-custodian | f1e7e2dd3679a275e2df49a86f9dee39f7845684 | [
"Apache-2.0"
] | 79 | 2019-03-20T12:27:06.000Z | 2019-08-14T14:07:04.000Z | c7n/resources/securityhub.py | LeonovecSergey/cloud-custodian | f1e7e2dd3679a275e2df49a86f9dee39f7845684 | [
"Apache-2.0"
] | 2 | 2019-04-22T15:20:23.000Z | 2019-08-27T12:37:51.000Z | # Copyright 2018-2019 Amazon.com, Inc. or its affiliates.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import Counter
from datetime import datetime
from dateutil.tz import tzutc
import jmespath
import json
import hashlib
import logging
from c7n.actions import Action
from c7n.filters import Filter
from c7n.exceptions import PolicyValidationError
from c7n.manager import resources
from c7n.policy import LambdaMode, execution
from c7n.utils import (
local_session, type_schema,
chunks, dumps, filter_empty, get_partition
)
from c7n.version import version
from .aws import AWS
log = logging.getLogger('c7n.securityhub')
class SecurityHubFindingFilter(Filter):
"""Check if there are Security Hub Findings related to the resources
"""
schema = type_schema(
'finding',
# Many folks do an aggregator region, allow them to use that
# for filtering.
region={'type': 'string'},
query={'type': 'object'})
schema_alias = True
permissions = ('securityhub:GetFindings',)
annotation_key = 'c7n:finding-filter'
query_shape = 'AwsSecurityFindingFilters'
def validate(self):
query = self.data.get('query')
if query:
from c7n.resources import aws
aws.shape_validate(query, self.query_shape, 'securityhub')
def process(self, resources, event=None):
client = local_session(
self.manager.session_factory).client(
'securityhub', region_name=self.data.get('region'))
found = []
params = dict(self.data.get('query', {}))
for r_arn, resource in zip(self.manager.get_arns(resources), resources):
params['ResourceId'] = [{"Value": r_arn, "Comparison": "EQUALS"}]
findings = client.get_findings(Filters=params).get("Findings")
if len(findings) > 0:
resource[self.annotation_key] = findings
found.append(resource)
return found
@classmethod
def register_resources(klass, registry, resource_class):
""" meta model subscriber on resource registration.
SecurityHub Findings Filter
"""
for rtype, resource_manager in registry.items():
if not resource_manager.has_arn():
continue
if 'post-finding' in resource_manager.action_registry:
continue
resource_class.filter_registry.register('finding', klass)
resources.subscribe(resources.EVENT_REGISTER, SecurityHubFindingFilter.register_resources)
@execution.register('hub-action')
@execution.register('hub-finding')
class SecurityHub(LambdaMode):
"""
Execute a policy lambda in response to security hub finding event or action.
.. example:
This policy will provision a lambda and security hub custom action.
The action can be invoked on a finding or insight result (collection
of findings). The action name will have the resource type prefixed as
custodian actions are resource specific.
.. code-block: yaml
policy:
- name: remediate
resource: aws.ec2
mode:
type: hub-action
role: MyRole
actions:
- snapshot
- type: set-instance-profile
name: null
- stop
.. example:
This policy will provision a lambda that will process high alert findings from
guard duty (note custodian also has support for guard duty events directly).
.. code-block: yaml
policy:
- name: remediate
resource: aws.iam
filters:
- type: event
key: detail.findings[].ProductFields.aws/securityhub/ProductName
value: GuardDuty
- type: event
key: detail.findings[].ProductFields.aws/securityhub/ProductName
value: GuardDuty
actions:
- remove-keys
Note, for custodian we support additional resources in the finding via the Other resource,
so these modes work for resources that security hub doesn't natively support.
https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-cloudwatch-events.html
"""
schema = type_schema(
'hub-finding', aliases=('hub-action',),
rinherit=LambdaMode.schema)
ActionFinding = 'Security Hub Findings - Custom Action'
ActionInsight = 'Security Hub Insight Results'
ImportFinding = 'Security Hub Findings - Imported'
handlers = {
ActionFinding: 'resolve_action_finding',
ActionInsight: 'resolve_action_insight',
ImportFinding: 'resolve_import_finding'
}
def resolve_findings(self, findings):
rids = set()
for f in findings:
for r in f['Resources']:
# Security hub invented some new arn format for a few resources...
# detect that and normalize to something sane.
if r['Id'].startswith('AWS') and r['Type'] == 'AwsIamAccessKey':
rids.add('arn:aws:iam::%s:user/%s' % (
f['AwsAccountId'],
r['Details']['AwsIamAccessKey']['UserName']))
elif not r['Id'].startswith('arn'):
log.warning("security hub unknown id:%s rtype:%s",
r['Id'], r['Type'])
else:
rids.add(r['Id'])
return rids
def resolve_action_insight(self, event):
rtype = event['detail']['resultType']
rids = [list(i.keys())[0] for i in event['detail']['insightResults']]
client = local_session(
self.policy.session_factory).client('securityhub')
insights = client.get_insights(
InsightArns=[event['detail']['insightArn']]).get(
'Insights', ())
if not insights or len(insights) > 1:
return []
insight = insights.pop()
params = {}
params['Filters'] = insight['Filters']
params['Filters'][rtype] = [
{'Comparison': 'EQUALS', 'Value': r} for r in rids]
findings = client.get_findings(**params).get('Findings', ())
return self.resolve_findings(findings)
def resolve_action_finding(self, event):
return self.resolve_findings(event['detail']['findings'])
def resolve_import_finding(self, event):
return self.resolve_findings(event['detail']['findings'])
def resolve_resources(self, event):
# For centralized setups in a hub aggregator account
self.assume_member(event)
event_type = event['detail-type']
arn_resolver = getattr(self, self.handlers[event_type])
arns = arn_resolver(event)
# Lazy import to avoid aws sdk runtime dep in core
from c7n.resources.aws import Arn
resource_map = {Arn.parse(r) for r in arns}
# sanity check on finding resources matching policy resource
# type's service.
if self.policy.resource_manager.type != 'account':
log.info(
"mode:security-hub resolve resources %s", list(resource_map))
if not resource_map:
return []
resource_arns = [
r for r in resource_map
if r.service == self.policy.resource_manager.resource_type.service]
resources = self.policy.resource_manager.get_resources(
[r.resource for r in resource_arns])
else:
resources = self.policy.resource_manager.get_resources([])
resources[0]['resource-arns'] = resource_arns
return resources
FindingTypes = {
"Software and Configuration Checks",
"TTPs",
"Effects",
"Unusual Behaviors",
"Sensitive Data Identifications"
}
# Mostly undocumented value size limit
SECHUB_VALUE_SIZE_LIMIT = 1024
class PostFinding(Action):
"""Report a finding to AWS Security Hub.
Custodian acts as a finding provider, allowing users to craft
policies that report to the AWS SecurityHub.
For resources that are taggable, we will tag the resource with an identifier
such that further findings generate updates.
Example generate a finding for accounts that don't have shield enabled.
:example:
.. code-block:: yaml
policies:
- name: account-shield-enabled
resource: account
filters:
- shield-enabled
actions:
- type: post-finding
description: |
Shield should be enabled on account to allow for DDOS protection (1 time 3k USD Charge).
severity_normalized: 6
types:
- "Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)"
recommendation: "Enable shield"
recommendation_url: "https://www.example.com/policies/AntiDDoS.html"
confidence: 100
compliance_status: FAILED
""" # NOQA
FindingVersion = "2018-10-08"
ProductName = "default"
permissions = ('securityhub:BatchImportFindings',)
schema_alias = True
schema = type_schema(
"post-finding",
required=["types"],
title={"type": "string"},
description={'type': 'string'},
severity={"type": "number", 'default': 0},
severity_normalized={"type": "number", "min": 0, "max": 100, 'default': 0},
confidence={"type": "number", "min": 0, "max": 100},
criticality={"type": "number", "min": 0, "max": 100},
# Cross region aggregation
region={'type': 'string', 'description': 'cross-region aggregation target'},
recommendation={"type": "string"},
recommendation_url={"type": "string"},
fields={"type": "object"},
batch_size={'type': 'integer', 'minimum': 1, 'maximum': 10},
types={
"type": "array",
"minItems": 1,
"items": {"type": "string"},
},
compliance_status={
"type": "string",
"enum": ["PASSED", "WARNING", "FAILED", "NOT_AVAILABLE"],
},
)
NEW_FINDING = 'New'
def validate(self):
for finding_type in self.data["types"]:
if finding_type.count('/') > 2 or finding_type.split('/')[0] not in FindingTypes:
raise PolicyValidationError(
"Finding types must be in the format 'namespace/category/classifier'."
" Found {}. Valid namespace values are: {}.".format(
finding_type, " | ".join([ns for ns in FindingTypes])))
def get_finding_tag(self, resource):
finding_tag = None
tags = resource.get('Tags', [])
finding_key = '{}:{}'.format('c7n:FindingId',
self.data.get('title', self.manager.ctx.policy.name))
# Support Tags as dictionary
if isinstance(tags, dict):
return tags.get(finding_key)
# Support Tags as list of {'Key': 'Value'}
for t in tags:
key = t['Key']
value = t['Value']
if key == finding_key:
finding_tag = value
return finding_tag
def group_resources(self, resources):
grouped_resources = {}
for r in resources:
finding_tag = self.get_finding_tag(r) or self.NEW_FINDING
grouped_resources.setdefault(finding_tag, []).append(r)
return grouped_resources
def process(self, resources, event=None):
region_name = self.data.get('region', self.manager.config.region)
client = local_session(
self.manager.session_factory).client(
"securityhub", region_name=region_name)
now = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()
# default batch size to one to work around security hub console issue
# which only shows a single resource in a finding.
batch_size = self.data.get('batch_size', 1)
stats = Counter()
for key, grouped_resources in self.group_resources(resources).items():
for resource_set in chunks(grouped_resources, batch_size):
stats['Finding'] += 1
if key == self.NEW_FINDING:
finding_id = None
created_at = now
updated_at = now
else:
finding_id, created_at = self.get_finding_tag(
resource_set[0]).split(':', 1)
updated_at = now
finding = self.get_finding(
resource_set, finding_id, created_at, updated_at)
import_response = client.batch_import_findings(
Findings=[finding])
if import_response['FailedCount'] > 0:
stats['Failed'] += import_response['FailedCount']
self.log.error(
"import_response=%s" % (import_response))
if key == self.NEW_FINDING:
stats['New'] += len(resource_set)
# Tag resources with new finding ids
tag_action = self.manager.action_registry.get('tag')
if tag_action is None:
continue
tag_action({
'key': '{}:{}'.format(
'c7n:FindingId',
self.data.get(
'title', self.manager.ctx.policy.name)),
'value': '{}:{}'.format(
finding['Id'], created_at)},
self.manager).process(resource_set)
else:
stats['Update'] += len(resource_set)
self.log.debug(
"policy:%s securityhub %d findings resources %d new %d updated %d failed",
self.manager.ctx.policy.name,
stats['Finding'],
stats['New'],
stats['Update'],
stats['Failed'])
def get_finding(self, resources, existing_finding_id, created_at, updated_at):
policy = self.manager.ctx.policy
model = self.manager.resource_type
region = self.data.get('region', self.manager.config.region)
if existing_finding_id:
finding_id = existing_finding_id
else:
finding_id = '{}/{}/{}/{}'.format(
self.manager.config.region,
self.manager.config.account_id,
hashlib.md5(json.dumps(
policy.data).encode('utf8')).hexdigest(),
hashlib.md5(json.dumps(list(sorted(
[r[model.id] for r in resources]))).encode(
'utf8')).hexdigest())
finding = {
"SchemaVersion": self.FindingVersion,
"ProductArn": "arn:aws:securityhub:{}:{}:product/{}/{}".format(
region,
self.manager.config.account_id,
self.manager.config.account_id,
self.ProductName,
),
"AwsAccountId": self.manager.config.account_id,
# Long search chain for description values, as this was
# made required long after users had policies deployed, so
# use explicit description, or policy description, or
# explicit title, or policy name, in that order.
"Description": self.data.get(
"description", policy.data.get(
"description",
self.data.get('title', policy.name))).strip(),
"Title": self.data.get("title", policy.name),
'Id': finding_id,
"GeneratorId": policy.name,
'CreatedAt': created_at,
'UpdatedAt': updated_at,
"RecordState": "ACTIVE",
}
severity = {'Product': 0, 'Normalized': 0}
if self.data.get("severity") is not None:
severity["Product"] = self.data["severity"]
if self.data.get("severity_normalized") is not None:
severity["Normalized"] = self.data["severity_normalized"]
if severity:
finding["Severity"] = severity
recommendation = {}
if self.data.get("recommendation"):
recommendation["Text"] = self.data["recommendation"]
if self.data.get("recommendation_url"):
recommendation["Url"] = self.data["recommendation_url"]
if recommendation:
finding["Remediation"] = {"Recommendation": recommendation}
if "confidence" in self.data:
finding["Confidence"] = self.data["confidence"]
if "criticality" in self.data:
finding["Criticality"] = self.data["criticality"]
if "compliance_status" in self.data:
finding["Compliance"] = {"Status": self.data["compliance_status"]}
fields = {
'resource': policy.resource_type,
'ProviderName': 'CloudCustodian',
'ProviderVersion': version
}
if "fields" in self.data:
fields.update(self.data["fields"])
else:
tags = {}
for t in policy.tags:
if ":" in t:
k, v = t.split(":", 1)
else:
k, v = t, ""
tags[k] = v
fields.update(tags)
if fields:
finding["ProductFields"] = fields
finding_resources = []
for r in resources:
finding_resources.append(self.format_resource(r))
finding["Resources"] = finding_resources
finding["Types"] = list(self.data["types"])
return filter_empty(finding)
def format_resource(self, r):
raise NotImplementedError("subclass responsibility")
class OtherResourcePostFinding(PostFinding):
fields = ()
def format_resource(self, r):
details = {}
for k in r:
if isinstance(k, (list, dict)):
continue
details[k] = r[k]
for f in self.fields:
value = jmespath.search(f['expr'], r)
if not value:
continue
details[f['key']] = value
for k, v in details.items():
if isinstance(v, datetime):
v = v.isoformat()
elif isinstance(v, (list, dict)):
v = dumps(v)
elif isinstance(v, (int, float, bool)):
v = str(v)
else:
continue
details[k] = v[:SECHUB_VALUE_SIZE_LIMIT]
details['c7n:resource-type'] = self.manager.type
other = {
'Type': 'Other',
'Id': self.manager.get_arns([r])[0],
'Region': self.manager.config.region,
'Partition': get_partition(self.manager.config.region),
'Details': {'Other': filter_empty(details)}
}
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if tags:
other['Tags'] = tags
return other
@classmethod
def register_resource(klass, registry, event):
for rtype, resource_manager in registry.items():
if not resource_manager.has_arn():
continue
if 'post-finding' in resource_manager.action_registry:
continue
resource_manager.action_registry.register('post-finding', klass)
AWS.resources.subscribe(
AWS.resources.EVENT_FINAL,
OtherResourcePostFinding.register_resource)
| 36.481884 | 110 | 0.580197 |
794549b4a79e9127df75490c8d9227dcdfbfb8c8 | 1,971 | py | Python | simclr/simclr.py | janselE/SimCLR-2 | 8bec90bf718206cae4e894120f5b3a773b8049ef | [
"MIT"
] | null | null | null | simclr/simclr.py | janselE/SimCLR-2 | 8bec90bf718206cae4e894120f5b3a773b8049ef | [
"MIT"
] | null | null | null | simclr/simclr.py | janselE/SimCLR-2 | 8bec90bf718206cae4e894120f5b3a773b8049ef | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
from simclr.modules.identity import Identity
class SimCLR(nn.Module):
"""
We opt for simplicity and adopt the commonly used ResNet (He et al., 2016) to obtain hi = f(x ̃i) = ResNet(x ̃i) where hi ∈ Rd is the output after the average pooling layer.
"""
def __init__(self, encoder, projection_dim, n_features):
super(SimCLR, self).__init__()
self.encoder = encoder
self.n_features = n_features
# Replace the fc layer with an Identity function
self.encoder.fc = Identity()
# We use a MLP with one hidden layer to obtain z_i = g(h_i) = W(2)σ(W(1)h_i) where σ is a ReLU non-linearity.
self.projector = nn.Sequential(
nn.Linear(self.n_features, self.n_features, bias=False),
nn.ReLU(),
nn.Linear(self.n_features, projection_dim, bias=False),
)
self.attn = nn.Sequential(
nn.Linear(self.n_features, self.n_features, bias=False),
nn.SiLU(),
nn.Linear(self.n_features, self.n_features, bias=False),
nn.SiLU(),
nn.Linear(self.n_features, self.n_features, bias=False),
nn.Sigmoid(),
)
def forward(self, x_i, x_j, attn=False, mask_type="sigmoid"):
h_i = self.encoder(x_i)
# for evaluation
if x_j == None:
return h_i
else:
h_j = self.encoder(x_j)
if attn:
mask = self.attn(h_i)
if mask_type == "hard":
mask = torch.round(mask)
if mask_type == "softmax":
mask = torch.softmax(mask, 1)
if mask_type == "sigmoid":
mask = torch.sigmoid(mask)
h_i = h_i * mask
h_j = h_j * mask
z_i = self.projector(h_i)
z_j = self.projector(h_j)
if attn:
return h_i, h_j, z_i, z_j, mask
return h_i, h_j, z_i, z_j, None
| 29.863636 | 177 | 0.561644 |
79454b80bf804be50a3169cfb956255cebc09f82 | 7,732 | py | Python | top_block.py | kangnaclub9/gps-sdr-sim-realtime | 54e14ad331bcd24d1709e6c0e808ffcf2ec0b506 | [
"MIT"
] | 1 | 2020-04-18T18:52:16.000Z | 2020-04-18T18:52:16.000Z | top_block.py | kangnaclub9/gps-sdr-sim-realtime | 54e14ad331bcd24d1709e6c0e808ffcf2ec0b506 | [
"MIT"
] | null | null | null | top_block.py | kangnaclub9/gps-sdr-sim-realtime | 54e14ad331bcd24d1709e6c0e808ffcf2ec0b506 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# Generated: Sun Apr 30 23:18:08 2017
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from grc_gnuradio import blks2 as grc_blks2
from optparse import OptionParser
import osmosdr
import sip
import sys
import time
class top_block(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Top Block")
Qt.QWidget.__init__(self)
self.setWindowTitle("Top Block")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 2600000
##################################################
# Blocks
##################################################
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.10)
self.qtgui_waterfall_sink_x_0.enable_grid(False)
if not True:
self.qtgui_waterfall_sink_x_0.disable_legend()
if complex == type(float()):
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ["", "", "", "", "",
"", "", "", "", ""]
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_waterfall_sink_x_0_win)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(False)
self.qtgui_freq_sink_x_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0.disable_legend()
if complex == type(float()):
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_freq_sink_x_0_win)
self.osmosdr_sink_0 = osmosdr.sink( args="numchan=" + str(1) + " " + "hackrf=81320f" )
self.osmosdr_sink_0.set_clock_source("external", 0)
self.osmosdr_sink_0.set_sample_rate(samp_rate)
self.osmosdr_sink_0.set_center_freq(1575420000, 0)
self.osmosdr_sink_0.set_freq_corr(0, 0)
self.osmosdr_sink_0.set_gain(0, 0)
self.osmosdr_sink_0.set_if_gain(20, 0)
self.osmosdr_sink_0.set_bb_gain(20, 0)
self.osmosdr_sink_0.set_antenna("RX/TX", 0)
self.osmosdr_sink_0.set_bandwidth(2600000, 0)
self.blks2_tcp_source_0 = grc_blks2.tcp_source(
itemsize=gr.sizeof_gr_complex*1,
addr="127.0.0.1",
port=1234,
server=True,
)
(self.blks2_tcp_source_0).set_max_output_buffer(10000)
##################################################
# Connections
##################################################
self.connect((self.blks2_tcp_source_0, 0), (self.osmosdr_sink_0, 0))
self.connect((self.blks2_tcp_source_0, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.blks2_tcp_source_0, 0), (self.qtgui_waterfall_sink_x_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.osmosdr_sink_0.set_sample_rate(self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0.set_frequency_range(0, self.samp_rate)
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
Qt.QApplication.setGraphicsSystem(gr.prefs().get_string('qtgui','style','raster'))
qapp = Qt.QApplication(sys.argv)
tb = top_block()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
tb = None # to clean up Qt widgets
| 38.854271 | 116 | 0.588334 |
79454c5d835f772002153b105b183b21dcf6d477 | 24,423 | py | Python | examples/contrib/run_swag.py | nabihach/pytorch-transformers | 4c99a4eda5459e36ebb45355fa789bb6cc0bce71 | [
"Apache-2.0"
] | null | null | null | examples/contrib/run_swag.py | nabihach/pytorch-transformers | 4c99a4eda5459e36ebb45355fa789bb6cc0bce71 | [
"Apache-2.0"
] | 1 | 2021-06-02T04:00:03.000Z | 2021-06-02T04:00:03.000Z | examples/contrib/run_swag.py | nabihach/pytorch-transformers | 4c99a4eda5459e36ebb45355fa789bb6cc0bce71 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
import argparse
import csv
import logging
import os
import random
import sys
from io import open
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_transformers.modeling_bert import BertForMultipleChoice, BertConfig
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class SwagExample(object):
"""A single training/test example for the SWAG dataset."""
def __init__(self,
swag_id,
context_sentence,
start_ending,
ending_0,
ending_1,
ending_2,
ending_3,
label = None):
self.swag_id = swag_id
self.context_sentence = context_sentence
self.start_ending = start_ending
self.endings = [
ending_0,
ending_1,
ending_2,
ending_3,
]
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
l = [
"swag_id: {}".format(self.swag_id),
"context_sentence: {}".format(self.context_sentence),
"start_ending: {}".format(self.start_ending),
"ending_0: {}".format(self.endings[0]),
"ending_1: {}".format(self.endings[1]),
"ending_2: {}".format(self.endings[2]),
"ending_3: {}".format(self.endings[3]),
]
if self.label is not None:
l.append("label: {}".format(self.label))
return ", ".join(l)
class InputFeatures(object):
def __init__(self,
example_id,
choices_features,
label
):
self.example_id = example_id
self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
for _, input_ids, input_mask, segment_ids in choices_features
]
self.label = label
def read_swag_examples(input_file, is_training):
with open(input_file, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
if is_training and lines[0][-1] != 'label':
raise ValueError(
"For training, the input file must contain a label column."
)
examples = [
SwagExample(
swag_id = line[2],
context_sentence = line[4],
start_ending = line[5], # in the swag dataset, the
# common beginning of each
# choice is stored in "sent2".
ending_0 = line[7],
ending_1 = line[8],
ending_2 = line[9],
ending_3 = line[10],
label = int(line[11]) if is_training else None
) for line in lines[1:] # we skip the line with the column names
]
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
is_training):
"""Loads a data file into a list of `InputBatch`s."""
# Swag is a multiple choice task. To perform this task using Bert,
# we will use the formatting proposed in "Improving Language
# Understanding by Generative Pre-Training" and suggested by
# @jacobdevlin-google in this issue
# https://github.com/google-research/bert/issues/38.
#
# Each choice will correspond to a sample on which we run the
# inference. For a given Swag example, we will create the 4
# following inputs:
# - [CLS] context [SEP] choice_1 [SEP]
# - [CLS] context [SEP] choice_2 [SEP]
# - [CLS] context [SEP] choice_3 [SEP]
# - [CLS] context [SEP] choice_4 [SEP]
# The model will output a single value for each input. To get the
# final decision of the model, we will run a softmax over these 4
# outputs.
features = []
for example_index, example in enumerate(examples):
context_tokens = tokenizer.tokenize(example.context_sentence)
start_ending_tokens = tokenizer.tokenize(example.start_ending)
choices_features = []
for ending_index, ending in enumerate(example.endings):
# We create a copy of the context tokens in order to be
# able to shrink it according to ending_tokens
context_tokens_choice = context_tokens[:]
ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
# Modifies `context_tokens_choice` and `ending_tokens` in
# place so that the total length is less than the
# specified length. Account for [CLS], [SEP], [SEP] with
# "- 3"
_truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3)
tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"]
segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
choices_features.append((tokens, input_ids, input_mask, segment_ids))
label = example.label
if example_index < 5:
logger.info("*** Example ***")
logger.info("swag_id: {}".format(example.swag_id))
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("tokens: {}".format(' '.join(tokens)))
logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
logger.info("input_mask: {}".format(' '.join(map(str, input_mask))))
logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids))))
if is_training:
logger.info("label: {}".format(label))
features.append(
InputFeatures(
example_id = example.swag_id,
choices_features = choices_features,
label = label
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .csv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
# Prepare model
model = BertForMultipleChoice.from_pretrained(args.bert_model,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)),
num_choices=4)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_train:
# Prepare data loader
train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True)
train_features = convert_examples_to_features(
train_examples, tokenizer, args.max_seq_length, True)
all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long)
all_label = torch.tensor([f.label for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, token_types_ids=segment_ids, attention_mask=input_mask, labels=label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.fp16 and args.loss_scale != 1.0:
# rescale loss for fp16 training
# see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
loss = loss * args.loss_scale
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.do_train:
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
model = BertForMultipleChoice.from_pretrained(args.output_dir, num_choices=4)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
else:
model = BertForMultipleChoice.from_pretrained(args.bert_model, num_choices=4)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = read_swag_examples(os.path.join(args.data_dir, 'val.csv'), is_training = True)
eval_features = convert_examples_to_features(
eval_examples, tokenizer, args.max_seq_length, True)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(select_field(eval_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(eval_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(eval_features, 'segment_ids'), dtype=torch.long)
all_label = torch.tensor([f.label for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids)
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss/global_step}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 43.926259 | 134 | 0.601646 |
79454c74cc5d7595cf07f6ff32e72a3f886a8797 | 1,956 | py | Python | saccader/visual_attention/saccader_config.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | saccader/visual_attention/saccader_config.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | saccader/visual_attention/saccader_config.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Saccader model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from saccader.feedforward import bagnet_config
class ConfigDict(object):
pass
def get_config():
"""Returns the default configuration as instance of ConfigDict."""
config = ConfigDict()
# Optimization parameters (expected to be overridden by training script).
config.mc_samples = 1
config.reinforce_loss_wt = 1.0
config.l2_loss_wt = 8e-5
config.l2_loss_loc_wt = 8e-5
config.xent_loss_wt = 1.0
# Model parameters.
config.num_times = 6
config.attention_groups = 2
config.attention_layers_per_group = 2
config.soft_attention = False
config.num_classes = -1 # Specify num_classes in main.
# Representation network parameters.
config.representation_config = bagnet_config.get_config()
config.representation_config.blocks = [3, 4, 6, 3]
config.representation_config.activation = tf.nn.relu
config.representation_config.planes = [64, 128, 256, 512]
# Saccader-77 (for ImageNet 224).
config.representation_config.strides = [2, 2, 2, 1]
config.representation_config.kernel3 = [2, 2, 2, 2]
config.representation_config.final_bottleneck = True
config.representation_config.batch_norm.enable = True
return config
| 31.548387 | 75 | 0.764826 |
79454c7ca961c5f1608dc7328b034412ba76054d | 1,081 | py | Python | api/src/helper/image.py | carlotacb/compra-local | 146289a67a0c943cd12ef9dfb16d0524af8e616e | [
"Apache-2.0"
] | 18 | 2020-04-12T19:43:24.000Z | 2021-11-14T17:35:34.000Z | api/src/helper/image.py | carlotacb/compra-local | 146289a67a0c943cd12ef9dfb16d0524af8e616e | [
"Apache-2.0"
] | 6 | 2020-04-12T22:56:15.000Z | 2020-04-17T19:06:41.000Z | api/src/helper/image.py | carlotacb/compra-local | 146289a67a0c943cd12ef9dfb16d0524af8e616e | [
"Apache-2.0"
] | null | null | null | import base64
import os
import uuid
from PIL import Image
from src.config import IMAGE_RESIZE_SIZE, IMAGE_OUTPUT_FOLDER_PATH
from src.helper import log
def decode_image_file(image_file_path):
with open(image_file_path, 'rb') as img_file:
decoded_image = base64.b64encode(img_file.read()).decode('utf-8')
return decoded_image
def resize(image_base64):
try:
output_path = os.path.join(IMAGE_OUTPUT_FOLDER_PATH, str(uuid.uuid4()))
with open(output_path, 'wb') as img:
img.write(base64.b64decode(image_base64))
with Image.open(output_path) as img:
img.thumbnail(IMAGE_RESIZE_SIZE, Image.ANTIALIAS)
img.save(fp=output_path, format='JPEG', quality=95)
decoded_image = decode_image_file(output_path)
os.remove(output_path)
return decoded_image
except Exception as e:
log.error(f'Exception opening and resizing image: [{e}]')
log.exception(e)
return None
def decode_and_resize(image_file_path):
return resize(decode_image_file(image_file_path))
| 30.027778 | 79 | 0.705828 |
79454c9086f25899d81abfc6b23e4e3c98398863 | 307 | py | Python | jd/api/rest/KplOpenAftermarketRefundRequest.py | fengjinqi/linjuanbang | 8cdc4e81df73ccd737ac547da7f2c7dca545862a | [
"MIT"
] | 5 | 2019-10-30T01:16:30.000Z | 2020-06-14T03:32:19.000Z | jd/api/rest/KplOpenAftermarketRefundRequest.py | fengjinqi/linjuanbang | 8cdc4e81df73ccd737ac547da7f2c7dca545862a | [
"MIT"
] | 2 | 2020-10-12T07:12:48.000Z | 2021-06-02T03:15:47.000Z | jd/api/rest/KplOpenAftermarketRefundRequest.py | fengjinqi/linjuanbang | 8cdc4e81df73ccd737ac547da7f2c7dca545862a | [
"MIT"
] | 3 | 2019-12-06T17:33:49.000Z | 2021-03-01T13:24:22.000Z | from jd.api.base import RestApi
class KplOpenAftermarketRefundRequest(RestApi):
def __init__(self,domain='gw.api.360buy.com',port=80):
RestApi.__init__(self,domain, port)
self.afsRefundDetailDto = None
self.client = None
def getapiname(self):
return 'jd.kpl.open.aftermarket.refund'
| 18.058824 | 56 | 0.742671 |
79454ccedc5b600b89813e7a231ce5cdd4cc9268 | 6,946 | py | Python | utils/makesets.py | broadinstitute/ebola-predictor | 5c0ea769156f315f0a1c2ef7f947b7fb5fde47fb | [
"BSD-2-Clause"
] | 12 | 2015-06-13T22:13:51.000Z | 2022-02-12T16:18:14.000Z | utils/makesets.py | broadinstitute/ebola-predictor | 5c0ea769156f315f0a1c2ef7f947b7fb5fde47fb | [
"BSD-2-Clause"
] | null | null | null | utils/makesets.py | broadinstitute/ebola-predictor | 5c0ea769156f315f0a1c2ef7f947b7fb5fde47fb | [
"BSD-2-Clause"
] | 10 | 2015-05-31T21:29:39.000Z | 2022-02-12T12:35:41.000Z | """
This script creates the training and test sets.
@copyright: The Broad Institute of MIT and Harvard 2015
"""
import argparse
import sys, csv, os, random
import numpy as np
src_file = "./data/sources.txt"
var_file = "./data/variables.txt"
range_file = "./data/ranges.txt"
ignore_file = "./data/ignore.txt"
"""Returns a set of indices for the test set, making sure that both training and test
set will have same fraction of outcomes
:param all_data: all rows in the dataset
:param complete_rows: indices of rows w/out missing values in all_data
:param test_percentage: percentage of complete rows that will be used in the test set
"""
def test_set(all_data, complete_rows, test_percentage):
outlist = []
for i in complete_rows:
row = all_data[i]
outlist.append(int(row[0]))
out = np.array(outlist)
i0 = np.where(out == 0)
i1 = np.where(out == 1)
f = test_percentage / 100.0
ri0 = np.random.choice(i0[0], size=f*i0[0].shape[0], replace=False)
ri1 = np.random.choice(i1[0], size=f*i1[0].shape[0], replace=False)
itest = np.concatenate((ri1, ri0))
itest.sort()
# itest contains the indices for the complete_rows list, which in turn is the list of
# indices in the original data, so we:
return np.array(complete_rows)[itest]
"""Creates a training/test sets and saves them to the specified files. The test set won't
have any missing values, and will include the given percentage of complete rows from the
source data
:param test_percentage: percentage of complete rows that will be used in the test set
:param test_filename: name of file to store test set
:param train_filename: name of file to store training set
"""
def makesets(test_percentage, test_filename, train_filename):
# Creating destination folder
test_dir = os.path.split(test_filename)[0]
train_dir = os.path.split(train_filename)[0]
if test_dir != train_dir:
print "Error: testing and training file should be stored in the same directory!"
exit(1)
if not os.path.exists(test_dir):
os.makedirs(test_dir)
input_file = ""
with open(src_file, "rb") as sfile:
for line in sfile.readlines():
input_file = os.path.abspath(line.strip())
model_variables = []
if os.path.exists(test_dir + "/variables.txt"):
fn = test_dir + "/variables.txt"
else:
fn = var_file
with open(fn, "rb") as vfile:
for line in vfile.readlines():
line = line.strip()
if not line: continue
model_variables.append(line.split()[0])
range_variables = []
with open(range_file, "rb") as rfile:
for line in rfile.readlines():
line = line.strip()
if not line: continue
parts = line.strip().split()
if 2 < len(parts):
range_variables.append({"name":parts[0], "type":parts[1], "range":parts[2].split(",")})
ignore_records = []
with open(ignore_file, "rb") as rfile:
for line in rfile.readlines():
line = line.strip()
if not line: continue
ignore_records.append(line)
ids = []
all_data = []
idx_info = []
complete_rows = []
with open(input_file, "rb") as ifile:
reader = csv.reader(ifile)
titles = reader.next()
model_idx = [titles.index(var) for var in model_variables]
r0 = 0
r = 0
for row in reader:
if row[0] in ignore_records: continue
r0 += 1 # Starts at 1, because of titles
all_missing = True
some_missing = False
missing_dvar = row[model_idx[0]] == "\\N"
for i in range(1, len(model_variables)):
var_idx = model_idx[i]
if row[var_idx] == "\\N":
some_missing = True
else:
all_missing = False
inside_range = True
for var in range_variables:
idx = titles.index(var["name"])
val = row[idx]
if val == "\\N": continue
vtype = var["type"]
vrang = var["range"]
test = True
if vtype == "category":
test = val in vrang
else:
test = float(vrang[0]) <= float(val) and float(val) < float(vrang[1])
inside_range = inside_range and test
if not all_missing and not missing_dvar and inside_range:
ids.append(row[0])
idx_info.append([r0, row[0], row[model_idx[0]]])
all_data.append([row[idx].replace("\\N", "?") for idx in model_idx])
if not some_missing: complete_rows.append(r)
r += 1
test_idx = test_set(all_data, complete_rows, test_percentage)
training_data = []
testing_data = []
for r in range(0, len(all_data)):
row = all_data[r]
if r in test_idx:
testing_data.append(row)
else:
training_data.append(row)
# Saving index information
with open(test_filename.replace("-data", "-index"), "wb") as idxfile:
writer = csv.writer(idxfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for r in range(0, len(all_data)):
if r in test_idx: writer.writerow(idx_info[r])
with open(train_filename.replace("-data", "-index"), "wb") as idxfile:
writer = csv.writer(idxfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for r in range(0, len(all_data)):
if not r in test_idx: writer.writerow(idx_info[r])
with open(train_filename, "wb") as trfile:
writer = csv.writer(trfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(model_variables)
for row in training_data:
writer.writerow(row)
print "Wrote", len(training_data), "rows to training set in", train_filename
with open(test_filename, "wb") as tefile:
writer = csv.writer(tefile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(model_variables)
for row in testing_data:
writer.writerow(row)
print "Wrote", len(testing_data), "rows to training set in", test_filename
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--train', nargs=1, default=["./models/test/training-data.csv"],
help="Filename for training set")
parser.add_argument('-T', '--test', nargs=1, default=["./models/test/testing-data.csv"],
help="Filename for test set")
parser.add_argument('-p', '--percentage', type=int, nargs=1, default=[50],
help="Percentage of complete data to use in test set")
args = parser.parse_args()
makesets(args.percentage[0], args.test[0], args.train[0]) | 38.804469 | 103 | 0.603513 |
79454d592416859887ba6c68eca277cbc723ffc0 | 809 | bzl | Python | recipes/crc32c/config.bzl | curoky/rules_cc | 943408c05e2204e1e603b70db05037217a53868d | [
"Apache-2.0"
] | 3 | 2022-02-06T10:10:44.000Z | 2022-02-07T11:53:25.000Z | recipes/crc32c/config.bzl | curoky/rules_cc | 943408c05e2204e1e603b70db05037217a53868d | [
"Apache-2.0"
] | null | null | null | recipes/crc32c/config.bzl | curoky/rules_cc | 943408c05e2204e1e603b70db05037217a53868d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 curoky([email protected]).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config = {
"type": "new_git_repository",
"build_file": "default/BUILD",
"remote": "https://github.com/google/crc32c",
"used_version": "heads/main",
"versions": {
"heads/main": {},
},
}
| 33.708333 | 74 | 0.70581 |
79454f48dfac8576d555b38483f8fc476bc02e37 | 1,292 | py | Python | shared/script/test.py | magneticflux-/skija | 90712df54e0d93e48678746fc2e6b1741c3d26a0 | [
"Apache-2.0"
] | null | null | null | shared/script/test.py | magneticflux-/skija | 90712df54e0d93e48678746fc2e6b1741c3d26a0 | [
"Apache-2.0"
] | null | null | null | shared/script/test.py | magneticflux-/skija | 90712df54e0d93e48678746fc2e6b1741c3d26a0 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
import argparse, glob, os, subprocess, sys
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..')))
import script.common as common
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--skija-shared')
parser.add_argument('--skija-native')
(args, _) = parser.parse_known_args()
if args.skija_shared:
skija_shared = os.path.abspath(args.skija_shared)
if args.skija_native:
skija_native = os.path.abspath(args.skija_native)
common.pushd(os.path.join(os.path.dirname(__file__), os.pardir))
if not args.skija_shared:
skija_shared = 'target/classes'
if not args.skija_native:
skija_native = '../native/build'
classpath = [
common.fetch_maven('org.projectlombok', 'lombok', '1.18.20'),
common.fetch_maven('org.jetbrains', 'annotations', '20.1.0'),
skija_shared,
skija_native,
]
sources = common.glob('src/test/java', '*.java')
common.javac(classpath, sources, 'target/test-classes')
common.check_call([
'java',
'--class-path', common.classpath_separator.join(classpath + ['target/test-classes']),
'-ea',
'-esa',
'-Xcheck:jni',
'org.jetbrains.skija.TestSuite',
])
common.popd()
return 0
if __name__ == '__main__':
sys.exit(main()) | 27.489362 | 89 | 0.679567 |
79454f95086a146468fae07d42ce3c0bd6b00ee2 | 1,025 | py | Python | upf/solvers/__init__.py | aiplan4eu/upf | 334df5f86fb3704d98e048b8bc8e0ba19073c06e | [
"Apache-2.0"
] | 14 | 2021-03-24T06:37:34.000Z | 2022-01-28T12:36:45.000Z | upf/solvers/__init__.py | aiplan4eu/upf | 334df5f86fb3704d98e048b8bc8e0ba19073c06e | [
"Apache-2.0"
] | 58 | 2021-06-08T10:00:58.000Z | 2022-01-31T16:31:07.000Z | upf/solvers/__init__.py | aiplan4eu/upf | 334df5f86fb3704d98e048b8bc8e0ba19073c06e | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from upf.solvers.solver import Solver
from upf.solvers.factory import Factory
from upf.solvers.grounder import Grounder
from upf.solvers.parallel import Parallel
from upf.solvers.pddl_solver import PDDLSolver
from upf.solvers.plan_validator import SequentialPlanValidator
__all__ = [ 'Factory',
'Grounder',
'Parallel',
'PDDLSolver',
'SequentialPlanValidator',
'Solver'
]
| 33.064516 | 74 | 0.731707 |
79455052a159fb45dc10b7bcb9d9d797de04ed03 | 1,058 | py | Python | plot_candle.py | fredryce/stocker | 041fbe8348f7a035a607a214477cf423c4259171 | [
"MIT"
] | null | null | null | plot_candle.py | fredryce/stocker | 041fbe8348f7a035a607a214477cf423c4259171 | [
"MIT"
] | null | null | null | plot_candle.py | fredryce/stocker | 041fbe8348f7a035a607a214477cf423c4259171 | [
"MIT"
] | null | null | null |
import pandas as pd
from finta import TA
import mplfinance as mpf
from pandas_datareader import data as web
from matplotlib import pyplot as plt
import time
from matplotlib.animation import FuncAnimation
import yfinance as yf
# Finta likes lowercase
#data.columns = ["open", "high", "low", "close", "volume"]
# calc bol band
#bbands = TA.BBANDS(data, 30)
# cherry pick what to show on the chart
#bands_plot = pd.concat([bbands.BB_UPPER, bbands.BB_LOWER], axis=1)
#print(bands_plot)
#apd = mpf.make_addplot(bands_plot.head(300))
'''
test = hist.tail(400)
fig= plt.figure()
def animate(i):
global fig
#cansee = test.iloc[:i+1,:]
cansee = yahoo_tick.history(period="1d", interval="5m")
print(cansee)
#cansee.index = cansee["index"]
plt.clf()
fig, axs = mpf.plot(cansee, fig, type='candle', style='yahoo',
title=f'{ticker} (30)',
ylabel='Price (USD)',
ylabel_lower='Volume',
volume=True,
figscale=1.5,
animate=True
)
time.sleep(0.01)
#plt.tight_layout()
plt.show()
'''
| 14.694444 | 67 | 0.662571 |
79455086adf5afe58c64a3d5edb453f8ea09b87b | 4,742 | py | Python | pygluu/kubernetes/yamlparser.py | scottwedge/cloud-native-edition | 75f714210ec564dcef68c7b6a8c936ec615d0540 | [
"Apache-2.0"
] | null | null | null | pygluu/kubernetes/yamlparser.py | scottwedge/cloud-native-edition | 75f714210ec564dcef68c7b6a8c936ec615d0540 | [
"Apache-2.0"
] | null | null | null | pygluu/kubernetes/yamlparser.py | scottwedge/cloud-native-edition | 75f714210ec564dcef68c7b6a8c936ec615d0540 | [
"Apache-2.0"
] | null | null | null | """
License terms and conditions for Gluu Cloud Native Edition:
https://www.apache.org/licenses/LICENSE-2.0
"""
from pathlib import Path
import contextlib
import os
import json
import logging
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
from collections import OrderedDict, Mapping
import subprocess
import shlex
def update_settings_json_file(settings):
"""Write settings out to a json file
"""
with open(Path('./settings.json'), 'w+') as file:
json.dump(settings, file, indent=2)
def exec_cmd(cmd):
args = shlex.split(cmd)
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = popen.communicate()
retcode = popen.returncode
if retcode != 0:
logger.error(str(stderr, "utf-8"))
logger.info(str(stdout, "utf-8"))
return stdout, stderr, retcode
def get_logger(name):
log_format = '%(asctime)s - %(name)8s - %(levelname)5s - %(message)s'
logging.basicConfig(level=logging.INFO,
format=log_format,
filename='setup.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(log_format))
logging.getLogger(name).addHandler(console)
return logging.getLogger(name)
logger = get_logger("gluu-yaml-parser ")
class Parser(dict):
def __init__(self, filename, check_value=None, check_value_name=None, check_key='kind'):
super().__init__()
self.filename = Path(filename)
self.yaml = YAML()
self.yaml.preserve_quotes = True
self.manifests_dict_list = []
self.modify_dict = dict
self.tmp_yaml_file = Path("./tmp.yaml")
if check_value:
if self.filename.exists():
with open(filename) as file:
manifests_dicts = self.yaml.load_all(file)
for manifest in manifests_dicts:
try:
if manifest[check_key] == check_value:
if check_value_name:
if manifest['metadata']['name'] == check_value_name:
self.modify_dict = manifest
else:
self.manifests_dict_list.append(manifest)
else:
self.modify_dict = manifest
else:
self.manifests_dict_list.append(manifest)
except KeyError:
# Key kind is not found so its the values.yaml for helm which only has one dict item
self.modify_dict = manifest
with open(self.tmp_yaml_file, 'w') as file:
self.yaml.dump(self.modify_dict, file)
with open(self.tmp_yaml_file) as f:
super(Parser, self).update(self.yaml.load(f) or {})
@property
def return_manifests_dict(self):
if self.filename.exists():
with open(self.filename) as file:
manifests_dicts = self.yaml.load_all(file)
for manifest in manifests_dicts:
self.manifests_dict_list.append(manifest)
return self.manifests_dict_list
def __setitem__(self, key, value):
super(Parser, self).__setitem__(key, value)
def dump_it(self):
d = self.analyze_ordered_dict_object(self)
final_manifest_dict_list = self.manifests_dict_list + [d]
with open(self.filename, "w+") as f:
self.yaml.dump_all(final_manifest_dict_list, f)
with contextlib.suppress(FileNotFoundError):
os.remove(self.tmp_yaml_file)
def analyze_ordered_dict_object(self, data):
if isinstance(data, OrderedDict) or isinstance(data, dict):
commented_map = CommentedMap()
for k, v in data.items():
commented_map[k] = self.analyze_ordered_dict_object(v)
return commented_map
return data
def __delitem__(self, key):
try:
super(Parser, self).__delitem__(key)
except KeyError as e:
logger.error(e)
def update(self, other=None, **kwargs):
if other is not None:
for k, v in other.items() if isinstance(other, Mapping) else other:
self[k] = v
for k, v in kwargs.items():
self[k] = v
super(Parser, self).update(self)
| 35.38806 | 112 | 0.570434 |
794550b3e089a03ac52fc5325d7e48e5473b9e71 | 3,897 | py | Python | libs/parser.py | micobg/revolut-stocks | dd421eaa4fbd3d07418851744a8b8700a05c20c6 | [
"MIT"
] | null | null | null | libs/parser.py | micobg/revolut-stocks | dd421eaa4fbd3d07418851744a8b8700a05c20c6 | [
"MIT"
] | null | null | null | libs/parser.py | micobg/revolut-stocks | dd421eaa4fbd3d07418851744a8b8700a05c20c6 | [
"MIT"
] | null | null | null | import pdfreader
from pdfreader import PDFDocument, SimplePDFViewer
from pdfreader.viewer import PageDoesNotExist
from datetime import datetime, timedelta
import decimal
decimal.getcontext().rounding = decimal.ROUND_HALF_UP
from libs import (
REVOLUT_DATE_FORMAT,
REVOLUT_ACTIVITY_TYPES,
REVOLUT_CASH_ACTIVITY_TYPES,
REVOLUT_ACTIVITIES_PAGES_INDICATORS,
)
def get_activity_range(page_strings):
begin_index = None
end_index = None
for index, page_string in enumerate(page_strings):
if page_string == "Amount":
begin_index = index
continue
if page_string == "SWEEP ACTIVITY":
end_index = index
break
return begin_index + 1, end_index
def extract_symbol_description(begin_index, page_strings):
symbol_description = ""
symbol = ""
end_index = begin_index
for page_string in page_strings[begin_index:]:
try:
decimal.Decimal(clean_number(page_string))
break
except decimal.InvalidOperation:
symbol_description += page_string
end_index += 1
symbol = symbol_description[0 : symbol_description.index("-") - 1]
return end_index, symbol, symbol_description
def clean_number(number_string):
return number_string.replace("(", "").replace(")", "").replace(",", "")
def extract_activity(begin_index, page_strings, num_fields):
end_index, symbol, symbol_description = extract_symbol_description(begin_index + 4, page_strings)
activity = {
"trade_date": datetime.strptime(page_strings[begin_index], REVOLUT_DATE_FORMAT),
"settle_date": datetime.strptime(page_strings[begin_index + 1], REVOLUT_DATE_FORMAT),
"currency": page_strings[begin_index + 2],
"activity_type": page_strings[begin_index + 3],
"symbol_description": symbol_description,
}
if num_fields == 8:
activity["symbol"] = symbol
activity["quantity"] = decimal.Decimal(page_strings[end_index])
activity["price"] = decimal.Decimal(page_strings[end_index + 1])
activity["amount"] = page_strings[end_index + 2]
elif num_fields == 6:
activity["amount"] = page_strings[end_index]
activity["amount"] = decimal.Decimal(clean_number(activity["amount"]))
return activity
def extract_activities(viewer):
activities = []
while True:
viewer.render()
page_strings = viewer.canvas.strings
if page_strings and page_strings[0] in REVOLUT_ACTIVITIES_PAGES_INDICATORS:
begin_index, end_index = get_activity_range(page_strings)
page_strings = page_strings[begin_index:end_index]
for index, page_string in enumerate(page_strings):
if page_string in REVOLUT_ACTIVITY_TYPES:
activity = extract_activity(index - 3, page_strings, 8)
elif page_string in REVOLUT_CASH_ACTIVITY_TYPES:
activity = extract_activity(index - 3, page_strings, 6)
else:
continue
activities.append(activity)
try:
viewer.next()
except PageDoesNotExist:
break
return activities
def find_place_position(statements, date):
pos = 0
for statement in statements:
if statement["trade_date"] > date:
break
pos += 1
return pos
def parse_statements(statement_files):
statements = []
for statement_file in statement_files:
with open(statement_file, "rb") as fd:
viewer = SimplePDFViewer(fd)
activities = extract_activities(viewer)
if not activities:
continue
statements.append(activities)
statements = sorted(statements, key=lambda k: k[0]["trade_date"])
return [activity for activities in statements for activity in activities]
| 30.445313 | 101 | 0.663587 |
794550d2ff775d2555115fde81c1ba8ab6c2b272 | 38,121 | py | Python | pyscf/ci/ucisd.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | 1 | 2018-05-02T19:55:30.000Z | 2018-05-02T19:55:30.000Z | pyscf/ci/ucisd.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | null | null | null | pyscf/ci/ucisd.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | 1 | 2018-12-06T03:10:50.000Z | 2018-12-06T03:10:50.000Z | #!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
'''
Unrestricted CISD
'''
import time
from functools import reduce
import tempfile
import numpy
import h5py
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.cc import uccsd
from pyscf.ci import cisd
from pyscf.cc.rccsd import _unpack_4fold, _mem_usage
from pyscf.ci.ucisd_slow import from_fci, to_fci
from pyscf.ci.ucisd_slow import make_rdm1, make_rdm2
einsum = lib.einsum
def kernel(myci, eris, ci0=None, max_cycle=50, tol=1e-8,
verbose=logger.INFO):
mol = myci.mol
diag = myci.make_diagonal(eris)
ehf = diag[0]
diag -= ehf
if ci0 is None:
ci0 = myci.get_init_guess(eris)[1]
def op(xs):
return [myci.contract(x, eris) for x in xs]
def precond(x, e, *args):
diagd = diag - (e-myci.level_shift)
diagd[abs(diagd)<1e-8] = 1e-8
return x / diagd
conv, ecisd, ci = lib.davidson1(op, ci0, precond, tol=tol,
max_cycle=max_cycle, max_space=myci.max_space,
lindep=myci.lindep, nroots=myci.nroots,
verbose=verbose)
if myci.nroots == 1:
conv = conv[0]
ecisd = ecisd[0]
ci = ci[0]
return conv, ecisd, ci
def make_diagonal(myci, eris):
nocca = eris.nocca
noccb = eris.noccb
nmoa = eris.focka.shape[0]
nmob = eris.focka.shape[1]
nvira = nmoa - nocca
nvirb = nmob - noccb
jdiag_aa = numpy.zeros((nmoa,nmoa))
jdiag_ab = numpy.zeros((nmoa,nmob))
jdiag_bb = numpy.zeros((nmob,nmob))
jdiag_aa[:nocca,:nocca] = numpy.einsum('iijj->ij', eris.oooo)
jdiag_aa[:nocca,nocca:] = numpy.einsum('iijj->ji', eris.vvoo)
jdiag_aa[nocca:,:nocca] = jdiag_aa[:nocca,nocca:].T
jdiag_ab[:nocca,:noccb] = numpy.einsum('iijj->ij', eris.ooOO)
jdiag_ab[:nocca,noccb:] = numpy.einsum('iijj->ji', eris.VVoo)
jdiag_ab[nocca:,:noccb] = numpy.einsum('iijj->ij', eris.vvOO)
jdiag_bb[:noccb,:noccb] = numpy.einsum('iijj->ij', eris.OOOO)
jdiag_bb[:noccb,noccb:] = numpy.einsum('iijj->ji', eris.VVOO)
jdiag_bb[noccb:,:noccb] = jdiag_bb[:noccb,noccb:].T
kdiag_aa = numpy.zeros((nmoa,nmoa))
kdiag_bb = numpy.zeros((nmob,nmob))
kdiag_aa[:nocca,:nocca] = numpy.einsum('ijji->ij', eris.oooo)
kdiag_aa[:nocca,nocca:] = numpy.einsum('ijji->ji', eris.voov)
kdiag_aa[nocca:,:nocca] = kdiag_aa[:nocca,nocca:].T
kdiag_bb[:noccb,:noccb] = numpy.einsum('ijji->ij', eris.OOOO)
kdiag_bb[:noccb,noccb:] = numpy.einsum('ijji->ji', eris.VOOV)
kdiag_bb[noccb:,:noccb] = kdiag_bb[:noccb,noccb:].T
# if eris.vvvv is not None and eris.vvVV is not None and eris.VVVV is not None:
# def diag_idx(n):
# idx = numpy.arange(n)
# return idx * (idx + 1) // 2 + idx
# jdiag_aa[nocca:,nocca:] = eris.vvvv[diag_idx(nvira)[:,None],diag_idx(nvira)]
# jdiag_ab[nocca:,noccb:] = eris.vvVV[diag_idx(nvira)[:,None],diag_idx(nvirb)]
# jdiag_bb[noccb:,noccb:] = eris.VVVV[diag_idx(nvirb)[:,None],diag_idx(nvirb)]
# kdiag_aa[nocca:,nocca:] = lib.unpack_tril(eris.vvvv.diagonal())
# kdiag_bb[noccb:,noccb:] = lib.unpack_tril(eris.VVVV.diagonal())
jkdiag_aa = jdiag_aa - kdiag_aa
jkdiag_bb = jdiag_bb - kdiag_bb
mo_ea = eris.focka.diagonal()
mo_eb = eris.fockb.diagonal()
ehf = (mo_ea[:nocca].sum() + mo_eb[:noccb].sum()
- jkdiag_aa[:nocca,:nocca].sum() * .5
- jdiag_ab[:nocca,:noccb].sum()
- jkdiag_bb[:noccb,:noccb].sum() * .5)
dia_a = lib.direct_sum('a-i->ia', mo_ea[nocca:], mo_ea[:nocca])
dia_a -= jkdiag_aa[:nocca,nocca:]
dia_b = lib.direct_sum('a-i->ia', mo_eb[noccb:], mo_eb[:noccb])
dia_b -= jkdiag_bb[:noccb,noccb:]
e1diag_a = dia_a + ehf
e1diag_b = dia_b + ehf
e2diag_aa = lib.direct_sum('ia+jb->ijab', dia_a, dia_a)
e2diag_aa += ehf
e2diag_aa += jkdiag_aa[:nocca,:nocca].reshape(nocca,nocca,1,1)
e2diag_aa -= jkdiag_aa[:nocca,nocca:].reshape(nocca,1,1,nvira)
e2diag_aa -= jkdiag_aa[:nocca,nocca:].reshape(1,nocca,nvira,1)
e2diag_aa += jkdiag_aa[nocca:,nocca:].reshape(1,1,nvira,nvira)
e2diag_ab = lib.direct_sum('ia+jb->ijab', dia_a, dia_b)
e2diag_ab += ehf
e2diag_ab += jdiag_ab[:nocca,:noccb].reshape(nocca,noccb,1,1)
e2diag_ab += jdiag_ab[nocca:,noccb:].reshape(1,1,nvira,nvirb)
e2diag_ab -= jdiag_ab[:nocca,noccb:].reshape(nocca,1,1,nvirb)
e2diag_ab -= jdiag_ab[nocca:,:noccb].T.reshape(1,noccb,nvira,1)
e2diag_bb = lib.direct_sum('ia+jb->ijab', dia_b, dia_b)
e2diag_bb += ehf
e2diag_bb += jkdiag_bb[:noccb,:noccb].reshape(noccb,noccb,1,1)
e2diag_bb -= jkdiag_bb[:noccb,noccb:].reshape(noccb,1,1,nvirb)
e2diag_bb -= jkdiag_bb[:noccb,noccb:].reshape(1,noccb,nvirb,1)
e2diag_bb += jkdiag_bb[noccb:,noccb:].reshape(1,1,nvirb,nvirb)
return amplitudes_to_cisdvec(ehf, (e1diag_a, e1diag_b),
(e2diag_aa, e2diag_ab, e2diag_bb))
def contract(myci, civec, eris):
nocca = eris.nocca
noccb = eris.noccb
nmoa = eris.focka.shape[0]
nmob = eris.fockb.shape[0]
nvira = nmoa - nocca
nvirb = nmob - noccb
c0, (c1a,c1b), (c2aa,c2ab,c2bb) = \
cisdvec_to_amplitudes(civec, (nmoa,nmob), (nocca,noccb))
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
t0 = 0
t1a = 0
t1b = 0
t2aa = 0
t2ab = 0
t2bb = 0
eris_vvoo = _cp(eris.vvoo)
eris_VVoo = _cp(eris.VVoo)
eris_vvOO = _cp(eris.vvOO)
eris_VVOO = _cp(eris.VVOO)
eris_voov = _cp(eris.voov)
eris_voOV = _cp(eris.voOV)
eris_VOOV = _cp(eris.VOOV)
#:t2 += eris.oovv * c0
t2aa += .25 * c0 * eris_voov.transpose(1,2,0,3)
t2aa -= .25 * c0 * eris_voov.transpose(1,2,3,0)
t2bb += .25 * c0 * eris_VOOV.transpose(1,2,0,3)
t2bb -= .25 * c0 * eris_VOOV.transpose(1,2,3,0)
t2ab += c0 * eris_voOV.transpose(1,2,0,3)
#:t0 += numpy.einsum('ijab,ijab', eris.oovv, c2) * .25
t0 += numpy.einsum('aijb,ijab', eris.voov, c2aa) * .25
t0 -= numpy.einsum('ajib,ijab', eris.voov, c2aa) * .25
t0 += numpy.einsum('aijb,ijab', eris.VOOV, c2bb) * .25
t0 -= numpy.einsum('ajib,ijab', eris.VOOV, c2bb) * .25
t0 += numpy.einsum('aijb,ijab', eris.voOV, c2ab)
#:tmp = einsum('imae,mbej->ijab', c2, eris.ovvo)
#:tmp = tmp - tmp.transpose(0,1,3,2)
#:t2 += tmp - tmp.transpose(1,0,2,3)
voov = eris_voov - eris_vvoo.transpose(0,3,2,1)
VOOV = eris_VOOV - eris_VVOO.transpose(0,3,2,1)
t2aa += lib.einsum('imae,bjme->ijab', c2aa, voov)
t2aa += lib.einsum('iMaE,bjME->ijab', c2ab, eris_voOV)
t2bb += lib.einsum('imae,bjme->ijab', c2bb, VOOV)
t2bb += lib.einsum('mIeA,emJB->IJAB', c2ab, eris_voOV)
t2ab += lib.einsum('imae,emJB->iJaB', c2aa, eris_voOV)
t2ab += lib.einsum('iMaE,EMJB->iJaB', c2ab, VOOV)
t2ab += lib.einsum('IMAE,bjME->jIbA', c2bb, eris_voOV)
t2ab += lib.einsum('mIeA,bjme->jIbA', c2ab, voov)
t2ab -= lib.einsum('iMeA,ebJM->iJbA', c2ab, eris_vvOO)
t2ab -= lib.einsum('mIaE,EBjm->jIaB', c2ab, eris_VVoo)
#:t1 += einsum('nf,nafi->ia', c1, eris.ovvo)
t1a += numpy.einsum('nf,ainf->ia', c1a, eris_voov)
t1a -= numpy.einsum('nf,fani->ia', c1a, eris_vvoo)
t1b += numpy.einsum('nf,ainf->ia', c1b, eris_VOOV)
t1b -= numpy.einsum('nf,fani->ia', c1b, eris_VVOO)
t1b += numpy.einsum('nf,fnia->ia', c1a, eris_voOV)
t1a += numpy.einsum('nf,ainf->ia', c1b, eris_voOV)
#:t1 -= 0.5*einsum('mnae,mnie->ia', c2, eris.ooov)
eris_vooo = _cp(eris.vooo)
eris_VOOO = _cp(eris.VOOO)
eris_VOoo = _cp(eris.VOoo)
eris_voOO = _cp(eris.voOO)
t1a += lib.einsum('mnae,emni->ia', c2aa, eris_vooo)
t1b += lib.einsum('mnae,emni->ia', c2bb, eris_VOOO)
t1a -= lib.einsum('nMaE,EMni->ia', c2ab, eris_VOoo)
t1b -= lib.einsum('mNeA,emNI->IA', c2ab, eris_voOO)
#:tmp = einsum('ma,mbij->ijab', c1, eris.ovoo)
#:t2 -= tmp - tmp.transpose(0,1,3,2)
t2aa -= lib.einsum('ma,bjmi->jiba', c1a, eris_vooo)
t2bb -= lib.einsum('ma,bjmi->jiba', c1b, eris_VOOO)
t2ab -= lib.einsum('ma,BJmi->iJaB', c1a, eris_VOoo)
t2ab -= lib.einsum('MA,biMJ->iJbA', c1b, eris_voOO)
#:#:t1 -= 0.5*einsum('imef,maef->ia', c2, eris.ovvv)
#:eris_vovv = _cp(eris.vovv)
#:eris_VOVV = _cp(eris.VOVV)
#:eris_voVV = _cp(eris.voVV)
#:eris_VOvv = _cp(eris.VOvv)
#:t1a += lib.einsum('mief,emfa->ia', c2aa, eris_vovv)
#:t1b += lib.einsum('MIEF,EMFA->IA', c2bb, eris_VOVV)
#:t1a += lib.einsum('iMfE,EMaf->ia', c2ab, eris_VOvv)
#:t1b += lib.einsum('mIeF,emAF->IA', c2ab, eris_voVV)
#:#:tmp = einsum('ie,jeba->ijab', c1, numpy.asarray(eris.ovvv).conj())
#:#:t2 += tmp - tmp.transpose(1,0,2,3)
#:t2aa += lib.einsum('ie,bmae->imab', c1a, eris_vovv)
#:t2bb += lib.einsum('ie,bmae->imab', c1b, eris_VOVV)
#:t2ab += lib.einsum('ie,BMae->iMaB', c1a, eris_VOvv)
#:t2ab += lib.einsum('IE,amBE->mIaB', c1b, eris_voVV)
if nvira > 0 and nocca > 0:
mem_now = lib.current_memory()[0]
max_memory = myci.max_memory - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**2*nocca*2)), 2)
for p0,p1 in lib.prange(0, nvira, blksize):
vovv = _cp(eris.vovv[p0:p1]).reshape((p1-p0)*nocca,-1)
vovv = lib.unpack_tril(vovv).reshape(p1-p0,nocca,nvira,nvira)
t1a += lib.einsum('mief,emfa->ia', c2aa[:,:,p0:p1], vovv)
t2aa[:,:,p0:p1] += lib.einsum('ie,bmae->miba', c1a, vovv)
vovv = None
if nvirb > 0 and noccb > 0:
mem_now = lib.current_memory()[0]
max_memory = myci.max_memory - mem_now
blksize = max(int(max_memory*1e6/8/(nvirb**2*noccb*2)), 2)
for p0,p1 in lib.prange(0, nvirb, blksize):
VOVV = _cp(eris.VOVV[p0:p1]).reshape((p1-p0)*noccb,-1)
VOVV = lib.unpack_tril(VOVV).reshape(p1-p0,noccb,nvirb,nvirb)
t1b += lib.einsum('MIEF,EMFA->IA', c2bb[:,:,p0:p1], VOVV)
t2bb[:,:,p0:p1] += lib.einsum('ie,bmae->miba', c1b, VOVV)
VOVV = None
if nvirb > 0 and nocca > 0:
mem_now = lib.current_memory()[0]
max_memory = myci.max_memory - mem_now
blksize = max(int(max_memory*1e6/8/(nvirb**2*nocca*2)), 2)
for p0,p1 in lib.prange(0, nvira, blksize):
voVV = _cp(eris.voVV[p0:p1]).reshape((p1-p0)*nocca,-1)
voVV = lib.unpack_tril(voVV).reshape(p1-p0,nocca,nvirb,nvirb)
t1b += lib.einsum('mIeF,emAF->IA', c2ab[:,:,p0:p1], voVV)
t2ab[:,:,p0:p1] += lib.einsum('IE,amBE->mIaB', c1b, voVV)
voVV = None
if nvira > 0 and noccb > 0:
mem_now = lib.current_memory()[0]
max_memory = myci.max_memory - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**2*noccb*2)), 2)
for p0,p1 in lib.prange(0, nvirb, blksize):
VOvv = _cp(eris.VOvv[p0:p1]).reshape((p1-p0)*noccb,-1)
VOvv = lib.unpack_tril(VOvv).reshape(p1-p0,noccb,nvira,nvira)
t1a += lib.einsum('iMfE,EMaf->ia', c2ab[:,:,:,p0:p1], VOvv)
t2ab[:,:,:,p0:p1] += lib.einsum('ie,BMae->iMaB', c1a, VOvv)
VOvv = None
#:t1 = einsum('ie,ae->ia', c1, fvv)
t1a += einsum('ie,ae->ia', c1a, fvva)
t1b += einsum('ie,ae->ia', c1b, fvvb)
#:t1 -= einsum('ma,mi->ia', c1, foo)
t1a -=einsum('ma,mi->ia', c1a, fooa)
t1b -=einsum('ma,mi->ia', c1b, foob)
#:t1 += einsum('imae,me->ia', c2, fov)
t1a += numpy.einsum('imae,me->ia', c2aa, fova)
t1a += numpy.einsum('imae,me->ia', c2ab, fovb)
t1b += numpy.einsum('imae,me->ia', c2bb, fovb)
t1b += numpy.einsum('miea,me->ia', c2ab, fova)
#:tmp = einsum('ijae,be->ijab', c2, fvv)
#:t2 = tmp - tmp.transpose(0,1,3,2)
t2aa += lib.einsum('ijae,be->ijab', c2aa, fvva*.5)
t2bb += lib.einsum('ijae,be->ijab', c2bb, fvvb*.5)
t2ab += lib.einsum('iJaE,BE->iJaB', c2ab, fvvb)
t2ab += lib.einsum('iJeA,be->iJbA', c2ab, fvva)
#:tmp = einsum('imab,mj->ijab', c2, foo)
#:t2 -= tmp - tmp.transpose(1,0,2,3)
t2aa -= lib.einsum('imab,mj->ijab', c2aa, fooa*.5)
t2bb -= lib.einsum('imab,mj->ijab', c2bb, foob*.5)
t2ab -= lib.einsum('iMaB,MJ->iJaB', c2ab, foob)
t2ab -= lib.einsum('mIaB,mj->jIaB', c2ab, fooa)
#:tmp = numpy.einsum('ia,jb->ijab', c1, fov)
#:tmp = tmp - tmp.transpose(0,1,3,2)
#:t2 += tmp - tmp.transpose(1,0,2,3)
t2aa += numpy.einsum('ia,jb->ijab', c1a, fova)
t2bb += numpy.einsum('ia,jb->ijab', c1b, fovb)
t2ab += numpy.einsum('ia,jb->ijab', c1a, fovb)
t2ab += numpy.einsum('ia,jb->jiba', c1b, fova)
t2aa = t2aa - t2aa.transpose(0,1,3,2)
t2aa = t2aa - t2aa.transpose(1,0,2,3)
t2bb = t2bb - t2bb.transpose(0,1,3,2)
t2bb = t2bb - t2bb.transpose(1,0,2,3)
#:t2 += 0.5*einsum('mnab,mnij->ijab', c2, eris.oooo)
eris_oooo = _cp(eris.oooo)
eris_OOOO = _cp(eris.OOOO)
eris_ooOO = _cp(eris.ooOO)
t2aa += lib.einsum('mnab,minj->ijab', c2aa, eris_oooo)
t2bb += lib.einsum('mnab,minj->ijab', c2bb, eris_OOOO)
t2ab += lib.einsum('mNaB,miNJ->iJaB', c2ab, eris_ooOO)
#:t2 += 0.5*einsum('ijef,abef->ijab', c2, eris.vvvv)
#:eris_vvvv = ao2mo.restore(1, eris.vvvv, nvira)
#:eris_vvVV = ucisd_slow._restore(eris.vvVV, nvira, nvirb)
#:eris_VVVV = ao2mo.restore(1, eris.VVVV, nvirb)
#:t2aa += lib.einsum('ijef,aebf->ijab', c2aa, eris_vvvv)
#:t2bb += lib.einsum('ijef,aebf->ijab', c2bb, eris_VVVV)
#:t2ab += lib.einsum('iJeF,aeBF->iJaB', c2ab, eris_vvVV)
uccsd._add_vvvv_(myci, (c2aa,c2ab,c2bb), eris, (t2aa,t2ab,t2bb))
#:t1 += fov * c0
t1a += fova * c0
t1b += fovb * c0
#:t0 = numpy.einsum('ia,ia', fov, c1)
t0 += numpy.einsum('ia,ia', fova, c1a)
t0 += numpy.einsum('ia,ia', fovb, c1b)
return amplitudes_to_cisdvec(t0, (t1a,t1b), (t2aa,t2ab,t2bb))
def amplitudes_to_cisdvec(c0, c1, c2):
c1a, c1b = c1
c2aa, c2ab, c2bb = c2
nocca, nvira = c1a.shape
noccb, nvirb = c1b.shape
def trilidx(n):
idx = numpy.tril_indices(n, -1)
return idx[0] * n + idx[1]
ooidxa = trilidx(nocca)
vvidxa = trilidx(nvira)
ooidxb = trilidx(noccb)
vvidxb = trilidx(nvirb)
size = (1, nocca*nvira, noccb*nvirb, nocca*noccb*nvira*nvirb,
len(ooidxa)*len(vvidxa), len(ooidxb)*len(vvidxb))
loc = numpy.cumsum(size)
civec = numpy.empty(loc[-1])
civec[0] = c0
civec[loc[0]:loc[1]] = c1a.ravel()
civec[loc[1]:loc[2]] = c1b.ravel()
civec[loc[2]:loc[3]] = c2ab.ravel()
lib.take_2d(c2aa.reshape(nocca**2,nvira**2), ooidxa, vvidxa, out=civec[loc[3]:loc[4]])
lib.take_2d(c2bb.reshape(noccb**2,nvirb**2), ooidxb, vvidxb, out=civec[loc[4]:loc[5]])
return civec
def cisdvec_to_amplitudes(civec, nmoa_nmob, nocca_noccb):
norba, norbb = nmoa_nmob
nocca, noccb = nocca_noccb
nvira = norba - nocca
nvirb = norbb - noccb
nooa = nocca * (nocca-1) // 2
nvva = nvira * (nvira-1) // 2
noob = noccb * (noccb-1) // 2
nvvb = nvirb * (nvirb-1) // 2
size = (1, nocca*nvira, noccb*nvirb, nocca*noccb*nvira*nvirb,
nooa*nvva, noob*nvvb)
loc = numpy.cumsum(size)
c0 = civec[0]
c1a = civec[loc[0]:loc[1]].reshape(nocca,nvira)
c1b = civec[loc[1]:loc[2]].reshape(noccb,nvirb)
c2ab = civec[loc[2]:loc[3]].reshape(nocca,noccb,nvira,nvirb)
c2aa = _unpack_4fold(civec[loc[3]:loc[4]], nocca, nvira)
c2bb = _unpack_4fold(civec[loc[4]:loc[5]], noccb, nvirb)
return c0, (c1a,c1b), (c2aa,c2ab,c2bb)
class UCISD(cisd.CISD):
@property
def nocc(self):
nocca, noccb = self.get_nocc()
return nocca + noccb
@property
def nmo(self):
nmoa, nmob = self.get_nmo()
return nmoa + nmob
get_nocc = uccsd.get_nocc
get_nmo = uccsd.get_nmo
def kernel(self, ci0=None, mo_coeff=None, eris=None):
if eris is None:
eris = self.ao2mo(mo_coeff)
self.converged, self.e_corr, self.ci = \
kernel(self, eris, ci0, max_cycle=self.max_cycle,
tol=self.conv_tol, verbose=self.verbose)
if numpy.all(self.converged):
logger.info(self, 'UCISD converged')
else:
logger.info(self, 'UCISD not converged')
if self.nroots > 1:
for i,e in enumerate(self.e_tot):
logger.note(self, 'UCISD root %d E = %.16g', i, e)
else:
logger.note(self, 'E(UCISD) = %.16g E_corr = %.16g',
self.e_tot, self.e_corr)
return self.e_corr, self.ci
def get_init_guess(self, eris=None):
if eris is None:
eris = self.ao2mo(self.mo_coeff)
nocca = eris.nocca
noccb = eris.noccb
mo_ea = eris.focka.diagonal()
mo_eb = eris.fockb.diagonal()
eia_a = mo_ea[:nocca,None] - mo_ea[None,nocca:]
eia_b = mo_eb[:noccb,None] - mo_eb[None,noccb:]
t1a = eris.focka[:nocca,nocca:] / eia_a
t1b = eris.fockb[:noccb,noccb:] / eia_b
eris_voov = _cp(eris.voov)
eris_voOV = _cp(eris.voOV)
eris_VOOV = _cp(eris.VOOV)
t2aa = eris_voov.transpose(1,2,0,3) - eris_voov.transpose(2,1,0,3)
t2bb = eris_VOOV.transpose(1,2,0,3) - eris_VOOV.transpose(2,1,0,3)
t2ab = eris_voOV.transpose(1,2,0,3).copy()
t2aa /= lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
t2ab /= lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
t2bb /= lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
emp2 = numpy.einsum('ia,ia', eris.focka[:nocca,nocca:], t1a)
emp2 += numpy.einsum('ia,ia', eris.fockb[:noccb,noccb:], t1b)
emp2 += numpy.einsum('aijb,ijab', eris_voov, t2aa) * .25
emp2 -= numpy.einsum('ajib,ijab', eris_voov, t2aa) * .25
emp2 += numpy.einsum('aijb,ijab', eris_VOOV, t2bb) * .25
emp2 -= numpy.einsum('ajib,ijab', eris_VOOV, t2bb) * .25
emp2 += numpy.einsum('aijb,ijab', eris_voOV, t2ab)
self.emp2 = emp2
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
return self.emp2, amplitudes_to_cisdvec(1, (t1a,t1b), (t2aa,t2ab,t2bb))
contract = contract
make_diagonal = make_diagonal
def ao2mo(self, mo_coeff=None):
nocc = self.nocc
nvir = self.nmo - nocc
mem_incore, mem_outcore, mem_basic = _mem_usage(nocc, nvir)
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):
return _make_eris_incore(self, mo_coeff)
elif hasattr(self._scf, 'with_df'):
raise NotImplementedError
else:
return _make_eris_outcore(self, mo_coeff)
def to_fci(self, cisdvec, nmoa_nmob=None, nocca_noccb=None):
return to_fci(cisdvec, nmoa_nmob, nocca_noccb)
def from_fci(self, fcivec, nmoa_nmob=None, nocca_noccb=None):
return from_fci(fcivec, nmoa_nmob, nocca_noccb)
def make_rdm1(self, ci=None, nmoa_nmob=None, nocca_noccb=None):
if ci is None: ci = self.ci
if nmoa_nmob is None: nmoa_nmob = self.get_nmo()
if nocca_noccb is None: nocca_noccb = self.get_nocc()
return make_rdm1(ci, nmoa_nmob, nocca_noccb)
def make_rdm2(self, ci=None, nmoa_nmob=None, nocca_noccb=None):
if ci is None: ci = self.ci
if nmoa_nmob is None: nmoa_nmob = self.get_nmo()
if nocca_noccb is None: nocca_noccb = self.get_nocc()
return make_rdm2(ci, nmoa_nmob, nocca_noccb)
CISD = UCISD
class _UCISD_ERIs:
def __init__(self, myci, mo_coeff=None):
moidx = uccsd.get_umoidx(myci)
if mo_coeff is None:
mo_coeff = (myci.mo_coeff[0][:,moidx[0]], myci.mo_coeff[1][:,moidx[1]])
else:
mo_coeff = (mo_coeff[0][:,moidx[0]], mo_coeff[1][:,moidx[1]])
# Note: Always recompute the fock matrix in UCISD because the mf object may be
# converted from ROHF object in which orbital energies are eigenvalues of
# Roothaan Fock rather than the true alpha, beta orbital energies.
dm = myci._scf.make_rdm1(myci.mo_coeff, myci.mo_occ)
fockao = myci._scf.get_hcore() + myci._scf.get_veff(myci.mol, dm)
self.focka = reduce(numpy.dot, (mo_coeff[0].T, fockao[0], mo_coeff[0]))
self.fockb = reduce(numpy.dot, (mo_coeff[1].T, fockao[1], mo_coeff[1]))
self.mo_coeff = mo_coeff
self.nocca, self.noccb = myci.get_nocc()
self.oooo = None
self.vooo = None
self.voov = None
self.vvoo = None
self.vovv = None
self.vvvv = None
self.OOOO = None
self.VOOO = None
self.VOOV = None
self.VVOO = None
self.VOVV = None
self.VVVV = None
self.ooOO = None
self.voOO = None
self.voOV = None
self.vvOO = None
self.voVV = None
self.vvVV = None
self.VOoo = None
self.VVoo = None
self.VOvv = None
def _make_eris_incore(myci, mo_coeff=None):
eris = _UCISD_ERIs(myci, mo_coeff)
nocca = eris.nocca
noccb = eris.noccb
nmoa = eris.focka.shape[0]
nmob = eris.fockb.shape[0]
nvira = nmoa - nocca
nvirb = nmob - noccb
moa, mob = eris.mo_coeff
eri_aa = ao2mo.restore(1, ao2mo.full(myci._scf._eri, moa), nmoa)
eris.oooo = eri_aa[:nocca,:nocca,:nocca,:nocca].copy()
eris.vooo = eri_aa[nocca:,:nocca,:nocca,:nocca].copy()
eris.voov = eri_aa[nocca:,:nocca,:nocca,nocca:].copy()
eris.vvoo = eri_aa[nocca:,nocca:,:nocca,:nocca].copy()
vovv = eri_aa[nocca:,:nocca,nocca:,nocca:].reshape(-1,nvira,nvira)
eris.vovv = lib.pack_tril(vovv).reshape(nvira,nocca,nvira*(nvira+1)//2)
eris.vvvv = ao2mo.restore(4, eri_aa[nocca:,nocca:,nocca:,nocca:].copy(), nvira)
vovv = eri_aa = None
eri_bb = ao2mo.restore(1, ao2mo.full(myci._scf._eri, mob), nmob)
eris.OOOO = eri_bb[:noccb,:noccb,:noccb,:noccb].copy()
eris.VOOO = eri_bb[noccb:,:noccb,:noccb,:noccb].copy()
eris.VOOV = eri_bb[noccb:,:noccb,:noccb,noccb:].copy()
eris.VVOO = eri_bb[noccb:,noccb:,:noccb,:noccb].copy()
VOVV = eri_bb[noccb:,:noccb,noccb:,noccb:].reshape(-1,nvirb,nvirb)
eris.VOVV = lib.pack_tril(VOVV).reshape(nvirb,noccb,nvirb*(nvirb+1)//2)
eris.VVVV = ao2mo.restore(4, eri_bb[noccb:,noccb:,noccb:,noccb:].copy(), nvirb)
VOVV = eri_bb = None
eri_ab = ao2mo.general(myci._scf._eri, (moa,moa,mob,mob), compact=False)
eri_ab = eri_ab.reshape(nmoa,nmoa,nmob,nmob)
eris.ooOO = eri_ab[:nocca,:nocca,:noccb,:noccb].copy()
eris.voOO = eri_ab[nocca:,:nocca,:noccb,:noccb].copy()
eris.voOV = eri_ab[nocca:,:nocca,:noccb,noccb:].copy()
eris.vvOO = eri_ab[nocca:,nocca:,:noccb,:noccb].copy()
voVV = eri_ab[nocca:,:nocca,noccb:,noccb:].reshape(nocca*nvira,nvirb,nvirb)
eris.voVV = lib.pack_tril(voVV).reshape(nvira,nocca,nvirb*(nvirb+1)//2)
voVV = None
vvVV = eri_ab[nocca:,nocca:,noccb:,noccb:].reshape(nvira**2,nvirb**2)
idxa = numpy.tril_indices(nvira)
idxb = numpy.tril_indices(nvirb)
eris.vvVV = lib.take_2d(vvVV, idxa[0]*nvira+idxa[1], idxb[0]*nvirb+idxb[1])
eri_ba = lib.transpose(eri_ab.reshape(nmoa**2,nmob**2))
eri_ba = eri_ba.reshape(nmob,nmob,nmoa,nmoa)
eris.VOoo = eri_ba[noccb:,:noccb,:nocca,:nocca].copy()
eris.VVoo = eri_ba[noccb:,noccb:,:nocca,:nocca].copy()
VOvv = eri_ba[noccb:,:noccb,nocca:,nocca:].reshape(noccb*nvirb,nvira,nvira)
eris.VOvv = lib.pack_tril(VOvv).reshape(nvirb,noccb,nvira*(nvira+1)//2)
VOvv = None
eris.VVvv = eri_ba[noccb:,noccb:,nocca:,nocca:].copy() #X
return eris
def _make_eris_outcore(myci, mo_coeff=None):
cput0 = (time.clock(), time.time())
log = logger.Logger(myci.stdout, myci.verbose)
eris = _UCISD_ERIs(myci, mo_coeff)
nocca = eris.nocca
noccb = eris.noccb
nmoa = eris.focka.shape[0]
nmob = eris.fockb.shape[0]
nvira = nmoa - nocca
nvirb = nmob - noccb
moa, mob = eris.mo_coeff
mol = myci.mol
eris.feri = lib.H5TmpFile()
dtype = 'f8'
eris.oooo = eris.feri.create_dataset('oooo', (nocca,nocca,nocca,nocca), dtype)
eris.vooo = eris.feri.create_dataset('vooo', (nvira,nocca,nocca,nocca), dtype)
eris.voov = eris.feri.create_dataset('voov', (nvira,nocca,nocca,nvira), dtype)
eris.vvoo = eris.feri.create_dataset('vvoo', (nvira,nvira,nocca,nocca), dtype)
eris.vovv = eris.feri.create_dataset('vovv', (nvira,nocca,nvira*(nvira+1)//2), dtype)
#eris.vvvv = eris.feri.create_dataset('vvvv', (nvira,nvira,nvira,nvira), dtype)
eris.OOOO = eris.feri.create_dataset('OOOO', (noccb,noccb,noccb,noccb), dtype)
eris.VOOO = eris.feri.create_dataset('VOOO', (nvirb,noccb,noccb,noccb), dtype)
eris.VOOV = eris.feri.create_dataset('VOOV', (nvirb,noccb,noccb,nvirb), dtype)
eris.VVOO = eris.feri.create_dataset('VVOO', (nvirb,nvirb,noccb,noccb), dtype)
eris.VOVV = eris.feri.create_dataset('VOVV', (nvirb,noccb,nvirb*(nvirb+1)//2), dtype)
#eris.VVVV = eris.feri.create_dataset('VVVV', (nvirb,nvirb,nvirb,nvirb), dtype)
eris.ooOO = eris.feri.create_dataset('ooOO', (nocca,nocca,noccb,noccb), dtype)
eris.voOO = eris.feri.create_dataset('voOO', (nvira,nocca,noccb,noccb), dtype)
eris.voOV = eris.feri.create_dataset('voOV', (nvira,nocca,noccb,nvirb), dtype)
eris.vvOO = eris.feri.create_dataset('vvOO', (nvira,nvira,noccb,noccb), dtype)
eris.voVV = eris.feri.create_dataset('voVV', (nvira,nocca,nvirb*(nvirb+1)//2), dtype)
#eris.vvVV = eris.feri.create_dataset('vvVV', (nvira,nvira,nvirb,nvirb), dtype)
eris.VOoo = eris.feri.create_dataset('VOoo', (nvirb,noccb,nocca,nocca), dtype)
eris.VVoo = eris.feri.create_dataset('VVoo', (nvirb,nvirb,nocca,nocca), dtype)
eris.VOvv = eris.feri.create_dataset('VOvv', (nvirb,noccb,nvira*(nvira+1)//2), dtype)
cput1 = time.clock(), time.time()
# <ij||pq> = <ij|pq> - <ij|qp> = (ip|jq) - (iq|jp)
tmpfile2 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
ao2mo.general(mol, (moa,moa[:,:nocca],moa,moa), tmpfile2.name, 'aa')
with h5py.File(tmpfile2.name) as f:
buf = lib.unpack_tril(f['aa'][:nocca*nocca])
buf = buf.reshape(nocca,nocca,nmoa,nmoa)
eris.oooo[:] = buf[:,:,:nocca,:nocca]
oovv = buf[:,:,nocca:,nocca:].reshape(nocca**2,nvira**2)
eris.vvoo[:] = lib.transpose(oovv).reshape(nvira,nvira,nocca,nocca)
buf = oovv = None
for i0, i1 in lib.prange(0, nvira, nocca):
buf = lib.unpack_tril(f['aa'][(nocca+i0)*nocca:(nocca+i1)*nocca])
eris.vovv[i0:i1] = lib.pack_tril(buf[:,nocca:,nocca:]).reshape(i1-i0,nocca,-1)
buf = buf.reshape(i1-i0,nocca,nmoa,nmoa)
eris.vooo[i0:i1] = buf[:,:nocca,:nocca,:nocca]
eris.voov[i0:i1] = buf[:,:nocca,:nocca,nocca:]
buf = None
del(f['aa'])
if noccb > 0:
ao2mo.general(mol, (mob,mob[:,:noccb],mob,mob), tmpfile2.name, 'bb')
with h5py.File(tmpfile2.name) as f:
buf = lib.unpack_tril(f['bb'][:noccb*noccb])
buf = buf.reshape(noccb,noccb,nmob,nmob)
eris.OOOO[:] = buf[:,:,:noccb,:noccb]
oovv = buf[:,:,noccb:,noccb:].reshape(noccb**2,nvirb**2)
eris.VVOO[:] = lib.transpose(oovv).reshape(nvirb,nvirb,noccb,noccb)
buf = oovv = None
for i0, i1 in lib.prange(0, nvirb, noccb):
buf = lib.unpack_tril(f['bb'][(noccb+i0)*noccb:(noccb+i1)*noccb])
eris.VOVV[i0:i1] = lib.pack_tril(buf[:,noccb:,noccb:]).reshape(i1-i0,noccb,-1)
buf = buf.reshape(i1-i0,noccb,nmob,nmob)
eris.VOOO[i0:i1] = buf[:,:noccb,:noccb,:noccb]
eris.VOOV[i0:i1] = buf[:,:noccb,:noccb,noccb:]
buf = None
del(f['bb'])
ao2mo.general(mol, (moa,moa[:,:nocca],mob,mob), tmpfile2.name, 'ab')
with h5py.File(tmpfile2.name) as f:
buf = lib.unpack_tril(f['ab'][:nocca*nocca])
buf = buf.reshape(nocca,nocca,nmob,nmob)
eris.ooOO[:] = buf[:,:,:noccb,:noccb]
oovv = buf[:,:,noccb:,noccb:].reshape(nocca**2,nvirb**2)
eris.VVoo[:] = lib.transpose(oovv).reshape(nvirb,nvirb,nocca,nocca)
buf = oovv = None
for i0, i1 in lib.prange(0, nvira, nocca):
buf = lib.unpack_tril(f['ab'][(nocca+i0)*nocca:(nocca+i1)*nocca])
eris.voVV[i0:i1] = lib.pack_tril(buf[:,noccb:,noccb:]).reshape(i1-i0,nocca,-1)
buf = buf.reshape(i1-i0,nocca,nmob,nmob)
eris.voOO[i0:i1] = buf[:,:nocca,:noccb,:noccb]
eris.voOV[i0:i1] = buf[:,:nocca,:noccb,noccb:]
buf = None
del(f['ab'])
if noccb > 0:
ao2mo.general(mol, (mob,mob[:,:noccb],moa,moa), tmpfile2.name, 'ba')
with h5py.File(tmpfile2.name) as f:
buf = lib.unpack_tril(f['ba'][:noccb*noccb])
buf = buf.reshape(noccb,noccb,nmoa,nmoa)
oovv = buf[:,:,nocca:,nocca:].reshape(noccb**2,nvira**2)
eris.vvOO[:] = lib.transpose(oovv).reshape(nvira,nvira,noccb,noccb)
buf = oovv = None
for i0, i1 in lib.prange(0, nvirb, noccb):
buf = lib.unpack_tril(f['ba'][(noccb+i0)*noccb:(noccb+i1)*noccb])
eris.VOvv[i0:i1] = lib.pack_tril(buf[:,nocca:,nocca:]).reshape(i1-i0,noccb,-1)
buf = buf.reshape(i1-i0,noccb,nmoa,nmoa)
eris.VOoo[i0:i1] = buf[:,:noccb,:nocca,:nocca]
buf = None
del(f['ba'])
cput1 = log.timer_debug1('transforming vopq', *cput1)
orbva = moa[:,nocca:]
orbvb = mob[:,noccb:]
ao2mo.full(mol, orbva, eris.feri, dataname='vvvv')
ao2mo.full(mol, orbvb, eris.feri, dataname='VVVV')
ao2mo.general(mol, (orbva,orbva,orbvb,orbvb), eris.feri, dataname='vvVV')
eris.vvvv = eris.feri['vvvv']
eris.VVVV = eris.feri['VVVV']
eris.vvVV = eris.feri['vvVV']
cput1 = log.timer_debug1('transforming vvvv', *cput1)
log.timer('CISD integral transformation', *cput0)
return eris
def _cp(a):
return numpy.array(a, copy=False, order='C')
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import fci
numpy.random.seed(12)
nocc = 3
nvir = 5
nmo = nocc + nvir
c1a = numpy.random.random((nocc,nvir))
c1b = numpy.random.random((nocc,nvir))
c2aa = numpy.random.random((nocc,nocc,nvir,nvir))
c2bb = numpy.random.random((nocc,nocc,nvir,nvir))
c2ab = numpy.random.random((nocc,nocc,nvir,nvir))
c1 = (c1a, c1b)
c2 = (c2aa, c2ab, c2bb)
cisdvec = amplitudes_to_cisdvec(1., c1, c2)
fcivec = to_fci(cisdvec, (nmo,nmo), (nocc,nocc))
cisdvec1 = from_fci(fcivec, (nmo,nmo), (nocc,nocc))
print(abs(cisdvec-cisdvec1).sum())
ci1 = to_fci(cisdvec1, (nmo,nmo), (nocc,nocc))
print(abs(fcivec-ci1).sum())
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.charge = -2
mol.spin = 2
mol.basis = '3-21g'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
ehf0 = mf.e_tot - mol.energy_nuc()
myci = CISD(mf)
numpy.random.seed(10)
mo = numpy.random.random(myci.mo_coeff.shape)
eris0 = _make_eris_incore(myci, mo)
eris1 = _make_eris_outcore(myci, mo)
print('oooo', abs(eris0.oooo - eris1.oooo).max())
print('vooo', abs(eris0.vooo - eris1.vooo).max())
print('voov', abs(eris0.voov - eris1.voov).max())
print('vvoo', abs(eris0.vvoo - eris1.vvoo).max())
print('vovv', abs(eris0.vovv - eris1.vovv).max())
print('vvvv', abs(eris0.vvvv - eris1.vvvv).max())
print('OOOO', abs(eris0.OOOO - eris1.OOOO).max())
print('VOOO', abs(eris0.VOOO - eris1.VOOO).max())
print('VOOV', abs(eris0.VOOV - eris1.VOOV).max())
print('VVOO', abs(eris0.VVOO - eris1.VVOO).max())
print('VOVV', abs(eris0.VOVV - eris1.VOVV).max())
print('VVVV', abs(eris0.VVVV - eris1.VVVV).max())
print('ooOO', abs(eris0.ooOO - eris1.ooOO).max())
print('voOO', abs(eris0.voOO - eris1.voOO).max())
print('voOV', abs(eris0.voOV - eris1.voOV).max())
print('vvOO', abs(eris0.vvOO - eris1.vvOO).max())
print('voVV', abs(eris0.voVV - eris1.voVV).max())
print('vvVV', abs(eris0.vvVV - eris1.vvVV).max())
print('VOoo', abs(eris0.VOoo - eris1.VOoo).max())
print('VVoo', abs(eris0.VVoo - eris1.VVoo).max())
print('VOvv', abs(eris0.VOvv - eris1.VOvv).max())
eris = myci.ao2mo(mo)
print(lib.finger(myci.make_diagonal(eris)) - -838.45507742639279)
numpy.random.seed(12)
nocca, noccb = mol.nelec
nmo = mf.mo_occ[0].size
nvira = nmo - nocca
nvirb = nmo - noccb
c1a = .1 * numpy.random.random((nocca,nvira))
c1b = .1 * numpy.random.random((noccb,nvirb))
c2aa = .1 * numpy.random.random((nocca,nocca,nvira,nvira))
c2bb = .1 * numpy.random.random((noccb,noccb,nvirb,nvirb))
c2ab = .1 * numpy.random.random((nocca,noccb,nvira,nvirb))
cisdvec = amplitudes_to_cisdvec(1., (c1a, c1b), (c2aa, c2ab, c2bb))
hcisd0 = contract(myci, amplitudes_to_cisdvec(1., (c1a,c1b), (c2aa,c2ab,c2bb)), eris)
# from pyscf.ci import gcisd_slow
# res = cisdvec_to_amplitudes(hcisd0, nmoa_nmob, nocca_noccb)
# res = (res[0],
# uccsd.spatial2spin(res[1], eris.orbspin),
# uccsd.spatial2spin(res[2], eris.orbspin))
# print(lib.finger(gcisd_slow.amplitudes_to_cisdvec(*res)) - 187.10206473716548)
print(lib.finger(hcisd0) - 466.56620234351681)
eris = myci.ao2mo(mf.mo_coeff)
hcisd0 = contract(myci, cisdvec, eris)
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],
mf.mo_coeff[1], mf.mo_coeff[1]])
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
h2e = fci.direct_uhf.absorb_h1e((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec, .5)
nmo = (mf.mo_coeff[0].shape[1],mf.mo_coeff[1].shape[1])
fcivec = to_fci(cisdvec, nmo, mol.nelec)
hci1 = fci.direct_uhf.contract_2e(h2e, fcivec, h1a.shape[0], mol.nelec)
hci1 -= ehf0 * fcivec
hcisd1 = from_fci(hci1, nmo, mol.nelec)
print(numpy.linalg.norm(hcisd1-hcisd0) / numpy.linalg.norm(hcisd0))
ecisd = myci.kernel(eris=eris)[0]
efci = fci.direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec)[0]
print(ecisd, ecisd - -0.037067274690894436, '> E(fci)', efci-ehf0)
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.charge = 2
mol.spin = 2
mol.basis = '6-31g'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
ehf0 = mf.e_tot - mol.energy_nuc()
myci = CISD(mf)
eris = myci.ao2mo()
ecisd = myci.kernel(eris=eris)[0]
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],
mf.mo_coeff[1], mf.mo_coeff[1]])
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
efci, fcivec = fci.direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec)
print(ecisd, '== E(fci)', efci-ehf0)
dm1ref, dm2ref = fci.direct_uhf.make_rdm12s(fcivec, h1a.shape[0], mol.nelec)
rdm1 = make_rdm1(myci.ci, myci.get_nmo(), myci.get_nocc())
rdm2 = make_rdm2(myci.ci, myci.get_nmo(), myci.get_nocc())
print('dm1a', abs(dm1ref[0] - rdm1[0]).max())
print('dm1b', abs(dm1ref[1] - rdm1[1]).max())
print('dm2aa', abs(dm2ref[0] - rdm2[0]).max())
print('dm2ab', abs(dm2ref[1] - rdm2[1]).max())
print('dm2bb', abs(dm2ref[2] - rdm2[2]).max())
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'sto-3g',
'O': 'sto-3g',}
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
myci = CISD(mf)
eris = myci.ao2mo()
ecisd, civec = myci.kernel(eris=eris)
print(ecisd - -0.048878084082066106)
nmoa = mf.mo_energy[0].size
nmob = mf.mo_energy[1].size
rdm1 = myci.make_rdm1(civec)
rdm2 = myci.make_rdm2(civec)
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0], compact=False).reshape([nmoa]*4)
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1], compact=False).reshape([nmob]*4)
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],
mf.mo_coeff[1], mf.mo_coeff[1]], compact=False)
eri_ab = eri_ab.reshape(nmoa,nmoa,nmob,nmob)
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
e2 = (numpy.einsum('ij,ji', h1a, rdm1[0]) +
numpy.einsum('ij,ji', h1b, rdm1[1]) +
numpy.einsum('ijkl,ijkl', eri_aa, rdm2[0]) * .5 +
numpy.einsum('ijkl,ijkl', eri_ab, rdm2[1]) +
numpy.einsum('ijkl,ijkl', eri_bb, rdm2[2]) * .5)
print(ecisd + mf.e_tot - mol.energy_nuc() - e2) # = 0
print(abs(rdm1[0] - (numpy.einsum('ijkk->ij', rdm2[0]) +
numpy.einsum('ijkk->ij', rdm2[1]))/(mol.nelectron-1)).sum())
print(abs(rdm1[1] - (numpy.einsum('ijkk->ij', rdm2[2]) +
numpy.einsum('kkij->ij', rdm2[1]))/(mol.nelectron-1)).sum())
| 42.309656 | 94 | 0.600824 |
7945521ce5072c11815bdb096868d7c7a220a095 | 4,485 | py | Python | db/import-reports.py | brainwane/active-geolocator | 725fdc26ee1b22f82a77516232e5b12b54d6d862 | [
"MIT"
] | null | null | null | db/import-reports.py | brainwane/active-geolocator | 725fdc26ee1b22f82a77516232e5b12b54d6d862 | [
"MIT"
] | null | null | null | db/import-reports.py | brainwane/active-geolocator | 725fdc26ee1b22f82a77516232e5b12b54d6d862 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
import collections
import glob
import json
import os
import psycopg2
import subprocess
import sys
# Usage: import-reports <directory> <database>
def read_one_report(fname, bname):
with subprocess.Popen(
["gpg2", "--decrypt", "--quiet", "--batch", "--no-tty", fname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.DEVNULL) as proc:
data, msgs = proc.communicate()
rc = proc.wait()
if rc:
raise RuntimeError("{}: gpg: exit {}\n{}".format(fname, rc, msgs))
blob = json.loads(data.decode("utf-8"))
blob['blob'] = bname
results = [
(r[0], int(r[2]), float(r[3]))
for r in blob['results']
]
del blob['results']
meta = {}
meta['date'] = blob['timestamp']
meta['proxied'] = ('proxied_connection' in blob and
blob['proxied_connection'])
if meta['proxied']:
meta['proxy_addr'] = blob['client_ip']
meta['client_addr'] = '0.0.0.0'
if 'socks5' in blob:
blob['proxy_type'] = 'socks5'
else:
blob['proxy_type'] = 'ovpn'
else:
meta['client_addr'] = blob['client_ip']
meta['proxy_addr'] = '0.0.0.0'
if 'location_unknown' in meta:
meta['client_lat'] = 0
meta['client_lon'] = 0
else:
meta['client_lat'] = blob['latitude']
meta['client_lon'] = blob['longitude']
if not meta['proxied'] or 'proxy_location_unknown' in blob:
meta['proxy_lat'] = 0
meta['proxy_lon'] = 0
else:
meta['proxy_lat'] = blob['proxy_latitude']
meta['proxy_lon'] = blob['proxy_longitude']
if 'core' in blob: del blob['core']
if 'timestamp' in blob: del blob['timestamp']
if 'proxied_connection' in blob: del blob['proxied_connection']
if 'client_ip' in blob: del blob['client_ip']
if 'latitude' in blob: del blob['latitude']
if 'longitude' in blob: del blob['longitude']
if 'location_unknown' in blob: del blob['location_unknown']
if 'proxy_latitude' in blob: del blob['proxy_latitude']
if 'proxy_longitude' in blob: del blob['proxy_longitude']
if 'proxy_location_unknown' in blob: del blob['proxy_location_unknown']
if 'socks5' in blob: del blob['socks5']
meta['annot'] = json.dumps(blob)
meta['cid'] = blob['blob']
return meta, results
def get_already(db):
with db.cursor() as cur:
cur.execute("select distinct annot->>'blob' from batches")
return frozenset(r[0] for r in cur)
def record_one_batch(db, meta, results):
with db, db.cursor() as cur:
cur.execute("""
insert into batches
(date, proxied,
client_lat, client_lon, client_addr,
proxy_lat, proxy_lon, proxy_addr,
annot)
values
(%(date)s, %(proxied)s,
%(client_lat)s, %(client_lon)s, %(client_addr)s,
%(proxy_lat)s, %(proxy_lon)s, %(proxy_addr)s,
%(annot)s::jsonb)
returning id
""", meta)
batchid = cur.fetchone()
if meta['proxied']:
cip = meta['proxy_addr']
clat = meta['proxy_lat']
clon = meta['proxy_lon']
else:
cip = meta['client_addr']
clat = meta['client_lat']
clon = meta['client_lon']
cur.execute("""select label from hosts where ipv4 = %s""", (cip,))
if len(cur.fetchall()) == 0:
cur.execute("""
insert into hosts
values ('client', %s, %s, -1, default, %s, %s, default)
""",
(meta['cid'], cip, clat, clon))
meas = b",".join(
cur.mogrify("(%s,%s,%s,%s,%s)",
(batchid, cip, dest_ip, rtt, status))
for dest_ip, status, rtt in results
)
cur.execute(b"insert into measurements values " + meas)
def main():
reportdir = sys.argv[1]
db = psycopg2.connect(dbname=sys.argv[2])
already = get_already(db)
for fn in glob.glob(os.path.join(reportdir, "blob*")):
bn = os.path.basename(fn)
if bn in already: continue
sys.stderr.write(bn + "\n")
meta, results = read_one_report(fn, bn)
record_one_batch(db, meta, results)
main()
| 32.5 | 78 | 0.540245 |
7945546ca564cfbeb0d039736c5080a85ae08be6 | 2,378 | py | Python | docs/conf.py | vanheeringen-lab/snakemake-workflows | 5c13d49417ccd76cde90aace2279effdf4928726 | [
"MIT"
] | 29 | 2019-08-21T11:47:19.000Z | 2020-05-29T09:27:58.000Z | docs/conf.py | vanheeringen-lab/snakemake-workflows | 5c13d49417ccd76cde90aace2279effdf4928726 | [
"MIT"
] | 192 | 2019-07-30T12:02:32.000Z | 2020-06-01T14:25:01.000Z | docs/conf.py | vanheeringen-lab/snakemake-workflows | 5c13d49417ccd76cde90aace2279effdf4928726 | [
"MIT"
] | 4 | 2019-09-02T13:41:18.000Z | 2020-05-04T15:11:18.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# -- Project information -----------------------------------------------------
project = "seq2science"
author = "Maarten van der Sande & Siebren Frölich"
copyright = "Maarten van der Sande, Siebren Frölich, Jos Smits, Rebecca Snabel, Tilman Schäfers, & " \
"Simon van Heeringen."
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "m2r2", "sphinxarg.ext"]
source_suffix = [".rst", ".md"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_sidebars = {
"**": ["globaltoc.html", "relations.html", "sourcelink.html", "searchbox.html"]
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["resources"]
html_context = {
"display_github": True,
"github_host": "github.com",
"github_user": "vanheeringen-lab",
"github_repo": "seq2science",
"github_version": "master",
"conf_py_path": "/docs/",
"source_suffix": ".rst",
}
| 36.584615 | 102 | 0.659798 |
794554ae036938ab80e69428576ae14751b2d48d | 9,652 | py | Python | src/ch4/searchengine.py | amolnayak311/Programming-Collective-Intelligence | eaa55c3989a8d36e7b766fbaba267b4cbaedf5be | [
"Apache-2.0"
] | null | null | null | src/ch4/searchengine.py | amolnayak311/Programming-Collective-Intelligence | eaa55c3989a8d36e7b766fbaba267b4cbaedf5be | [
"Apache-2.0"
] | null | null | null | src/ch4/searchengine.py | amolnayak311/Programming-Collective-Intelligence | eaa55c3989a8d36e7b766fbaba267b4cbaedf5be | [
"Apache-2.0"
] | null | null | null | #
#
#
from urllib2 import urlopen
from BeautifulSoup import BeautifulSoup
from urlparse import urljoin
from pysqlite2 import dbapi2 as sqlite
import re
from itertools import groupby
ignorewords=frozenset(['the','of','to','and','a','in','is','it'])
class crawler:
def __init__(self, dbname):
self.con = sqlite.connect(dbname)
def __del__(self):
self.con.close()
def dbcommit(self):
self.con.commit()
#
#
#
def getentryid(self, table, field, value, createnew=True):
res = self.con.execute("select rowid from %s where %s = '%s'" % (table, field, value)).fetchone()
if res == None:
return self.con.execute("insert into %s (%s) values ('%s')" % (table, field, value)).lastrowid
else:
return res[0]
def addtoindex(self, url, soup):
if self.isindexed(url):
print "%s already indexed, skipping" % url
return
print "Indexing %s"%url
text = self.gettextonly(soup)
words = self.separatewords(text)
#Create and get the id from the DB for this URL
urlid = self.getentryid('urllist', 'url', url)
for i in range(len(words)):
word = words[i]
if word in ignorewords: continue
wordid = self.getentryid('wordlist', 'word', word)
self.con.execute('insert into wordlocation(urlid, wordid, location) \
values (%d, %d, %d)' % (urlid, wordid, i))
#
#
#
def gettextonly(self, soup):
v = soup.string
if v == None:
contents = soup.contents
resulttext = ''
for c in contents:
resulttext += self.gettextonly(c) + "\n"
return resulttext
else:
return v.strip()
#
# Not a very trivial operation in reality and a lot of research is being done into thi
# For sake of this example we will be separating over anything that is not a word or number
#
def separatewords(self, text):
splitter = re.compile("\\W*")
return [w.lower() for w in splitter.split(text) if w != '']
#
#
#
def isindexed(self, url):
val = self.con.execute("select rowid from urllist where url = '%s'" %url).fetchone()
#If the page is indexed is it really crawled and words indexed?
if val != None:
wordcount = self.con.execute("select count(1) from urllist where url = '%s'" % url).fetchone()
return False if wordcount[0] == 0 else True
else:
return False
def addlinkref(self, urlFrom, urlTo, linkText):
pass
#Breadth first search crawl
def crawl(self, pages, depth = 2):
for _ in range(depth):
newpages = set()
for page in pages:
try:
c = urlopen(page)
except Exception, err:
print "Exception while opening page %s"%page, err
continue
soup = BeautifulSoup(c.read())
self.addtoindex(page, soup)
links = soup('a')
for link in links:
link_attributes = dict(link.attrs)
if 'href' in link_attributes:
url = urljoin(page, link['href'])
#TODO: What is this check for
if url.find("'") != -1: continue
url = url.split("#")[0] #Remove the # in the URL and keep the base URL only
#Index http/https only
if url[0:4] == 'http' and not self.isindexed(url):
newpages.add(url)
linkText = self.gettextonly(link)
self.addlinkref(page, url, linkText)
self.dbcommit()
pages = newpages
def createindextable(self, drop_existing = False):
#TODO: Add logic to check if tables exist and accordingly create/drop first
if drop_existing:
self.con.execute('drop table if exists urllist')
self.con.execute('drop table if exists wordlist')
self.con.execute('drop table if exists wordlocation')
self.con.execute('drop table if exists link')
self.con.execute('drop table if exists linkwords')
self.con.execute('create table urllist(url)')
self.con.execute('create table wordlist(word)')
self.con.execute('create table wordlocation(urlid, wordid, location)')
self.con.execute('create table link(fromid integer, toid integer)')
self.con.execute('create table linkwords(wordid, linkid)')
self.con.execute('create index wordidx on wordlist(word)')
self.con.execute('create index urlidx on urllist(url)')
self.con.execute('create index wordurlidx on wordlocation(wordid)')
self.con.execute('create index urltoidx on link(toid)')
self.con.execute('create index urlfromidx on link(fromid)')
print "Schema Successfully (Re)created"
class searcher:
#
#
#
def __init__(self, dbname):
self.con=sqlite.connect(dbname)
#
#
#
def __del__(self):
self.con.close()
#
#
#
def getmatchrows(self, q, n = 100):
#Added n here to make quering faster, Not desired though, will miss out on results
#Split the words by space
words = q.split(' ')
in_values = ", ".join(["'%s'" % word.lower() for word in words])
cursor = self.con.execute("select word, rowid from wordlist where word in (%s)" % in_values)
available_words = [(elem[0], elem[1]) for elem in cursor]
if len(available_words) > 0:
fields_tables_conditions = [
("w%d.location" % i,
"wordlocation w%d" % i,
"w%d.wordid = %s" % (i, available_words[i][1])
) for i in range(len(available_words))]
joins = " and ".join(["w%d.urlid = w%d.urlid" % (i - 1, i) for i in range(1, len(available_words))])
(field_list, table_list, condition_list) = zip(*fields_tables_conditions)
tables = ", ".join(table_list)
fields = "w0.urlid, " + ", ".join(field_list)
conditions = " and ".join(condition_list)
conditions = conditions if len(joins) == 0 else joins + " and " + conditions
query = "select %s from %s where %s limit %d" % (fields, tables, conditions, n)
(_, word_ids) = zip(*available_words)
return [row for row in self.con.execute(query)], word_ids
else:
return None
def geturlname(self, urlid):
return self.con.execute("select url from urllist where rowid = %d" % urlid).fetchone()[0]
#
# Value 0 indicates min score and 1 indicates max score
#
def normalize(self, scores, smallIsBetter = True):
vsmall = 0.00001
if smallIsBetter:
min_score = min(scores.values())
return dict([(k, float(min_score) / max(v, vsmall)) for (k, v) in scores.items()])
else:
max_score = max(scores.values())
if max_score == 0 : max_score = vsmall
return dict([ (k, float(v) / max_score) for (k, v) in scores.items()])
#
#
#
def frequencyscore(self, rows):
return self.normalize(dict(
[
(key, len(list(values)) ) for (key, values) in groupby(sorted(rows), lambda (url, _1, _2) : url)
]
), smallIsBetter = False)
#
#
#
def getscoredlist(self, rows, wordids):
totalscores = dict([(row[0], 0) for row in rows])
weights = [(1.0, self.frequencyscore(rows))]
#Complex
for (weight, score) in weights:
for url in totalscores:
totalscores[url] += weight * score[url]
return totalscores
#
#
#
def query(self, q, n = 10):
rows, wordids = self.getmatchrows(q)
scores = self.getscoredlist(rows, wordids)
rankedscores = sorted([(score, urlid) for (urlid, score) in scores.items()], reverse =True)
#Why sort all when top n needed?
for (score, urlid) in rankedscores[0:n]:
print "%f\t%s" % (score, self.geturlname(urlid))
#crawler = crawler("searchindex.db")
#crawler.createindextable(True)
#crawler.createindextable()
#crawler.crawl(["https://en.wikipedia.org/wiki/Programming_language", "https://en.wikipedia.org/wiki/Functional_programming"])
searcher = searcher('searchindex.db')
#crawler.con.execute('create index wordurlidx_1 on wordlocation(urlid)')
#Works badly for long queries, following for instance screws
#results = searcher.getmatchrows("Functional programming with Scala and python")
#Following doesn't work too well and returns 123689 results
q = 'Programming in Scala'
print "Searching for text '%s'" % q
searcher.query(q)
| 34.22695 | 126 | 0.533465 |
7945559bc52819429ee5f7362205cece2fc48279 | 2,550 | py | Python | zun/tests/unit/common/test_profiler.py | wanghuiict/zun | 2f4a3a2ba06d7ca83002418d4003ee5dece70952 | [
"Apache-2.0"
] | 83 | 2016-09-14T22:06:26.000Z | 2022-01-27T03:49:52.000Z | zun/tests/unit/common/test_profiler.py | wanghuiict/zun | 2f4a3a2ba06d7ca83002418d4003ee5dece70952 | [
"Apache-2.0"
] | 2 | 2017-06-22T21:58:47.000Z | 2019-04-10T03:17:44.000Z | zun/tests/unit/common/test_profiler.py | wanghuiict/zun | 2f4a3a2ba06d7ca83002418d4003ee5dece70952 | [
"Apache-2.0"
] | 54 | 2016-09-29T10:16:02.000Z | 2022-01-28T19:12:49.000Z | # Copyright 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import inspect
from unittest import mock
from oslo_utils import importutils
from osprofiler import initializer as profiler_init
from osprofiler import opts as profiler_opts
from zun.common import profiler
from zun import conf
from zun.tests import base
class TestProfiler(base.TestCase):
def test_all_public_methods_are_traced(self):
profiler_opts.set_defaults(conf.CONF)
self.config(enabled=True,
group='profiler')
classes = [
'zun.compute.api.API',
'zun.compute.rpcapi.API',
]
for clsname in classes:
# give the metaclass and trace_cls() decorator a chance to patch
# methods of the classes above
importlib.reload(
importutils.import_module(clsname.rsplit('.', 1)[0]))
cls = importutils.import_class(clsname)
for attr, obj in cls.__dict__.items():
# only public methods are traced
if attr.startswith('_'):
continue
# only checks callables
if not (inspect.ismethod(obj) or inspect.isfunction(obj)):
continue
# osprofiler skips static methods
if isinstance(obj, staticmethod):
continue
self.assertTrue(getattr(obj, '__traced__', False), obj)
@mock.patch.object(profiler_init, 'init_from_conf')
def test_setup_profiler(self, mock_init):
self.config(enabled=True,
group='profiler')
profiler.setup('foo', 'localhost')
mock_init.assert_called_once_with(conf=conf.CONF,
context=mock.ANY,
project="zun",
service='foo',
host='localhost')
| 35.915493 | 78 | 0.603922 |
794555fb4ed39e36cfe93277f3adaba6169b9570 | 13,575 | py | Python | ctapipe/core/tool.py | capaths/ctapipe | be45ff531425a538240c984243df4f8ebc82d2f6 | [
"BSD-3-Clause"
] | null | null | null | ctapipe/core/tool.py | capaths/ctapipe | be45ff531425a538240c984243df4f8ebc82d2f6 | [
"BSD-3-Clause"
] | null | null | null | ctapipe/core/tool.py | capaths/ctapipe | be45ff531425a538240c984243df4f8ebc82d2f6 | [
"BSD-3-Clause"
] | null | null | null | """ Classes to handle configurable command-line user interfaces """
import logging
import textwrap
from abc import abstractmethod
from traitlets import Unicode
from traitlets.config import Application, Configurable
from .. import __version__ as version
from .traits import Path
from . import Provenance
from .logging import ColoredFormatter
class ToolConfigurationError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
self.message = message
class Tool(Application):
"""A base class for all executable tools (applications) that handles
configuration loading/saving, logging, command-line processing,
and provenance meta-data handling. It is based on
`traitlets.config.Application`. Tools may contain configurable
`ctapipe.core.Component` classes that do work, and their
configuration parameters will propagate automatically to the
`Tool`.
Tool developers should create sub-classes, and a name,
description, usage examples should be added by defining the
`name`, `description` and `examples` class attributes as
strings. The `aliases` attribute can be set to cause a lower-level
`Component` parameter to become a high-level command-line
parameter (See example below). The `setup()`, `start()`, and
`finish()` methods should be defined in the sub-class.
Additionally, any `ctapipe.core.Component` used within the `Tool`
should have their class in a list in the `classes` attribute,
which will automatically add their configuration parameters to the
tool.
Once a tool is constructed and the virtual methods defined, the
user can call the `run()` method to setup and start it.
.. code:: python
from ctapipe.core import Tool
from traitlets import (Integer, Float, List, Dict, Unicode)
class MyTool(Tool):
name = "mytool"
description = "do some things and stuff"
aliases = Dict({'infile': 'AdvancedComponent.infile',
'iterations': 'MyTool.iterations'})
# Which classes are registered for configuration
classes = List([MyComponent, AdvancedComponent,
SecondaryMyComponent])
# local configuration parameters
iterations = Integer(5,help="Number of times to run",
allow_none=False).tag(config=True)
def setup_comp(self):
self.comp = MyComponent(self, config=self.config)
self.comp2 = SecondaryMyComponent(self, config=self.config)
def setup_advanced(self):
self.advanced = AdvancedComponent(self, config=self.config)
def setup(self):
self.setup_comp()
self.setup_advanced()
def start(self):
self.log.info("Performing {} iterations..."\
.format(self.iterations))
for ii in range(self.iterations):
self.log.info("ITERATION {}".format(ii))
self.comp.do_thing()
self.comp2.do_thing()
sleep(0.5)
def finish(self):
self.log.warning("Shutting down.")
def main():
tool = MyTool()
tool.run()
if __name__ == "main":
main()
If this `main()` method is registered in `setup.py` under
*entry_points*, it will become a command-line tool (see examples
in the `ctapipe/tools` subdirectory).
"""
config_file = Path(
exists=True,
directory_ok=False,
help=(
"name of a configuration file with "
"parameters to load in addition to "
"command-line parameters"
),
).tag(config=True)
log_format = Unicode(
"%(levelname)s [%(name)s] (%(module)s/%(funcName)s): %(message)s",
help="The Logging format template",
).tag(config=True)
_log_formatter_cls = ColoredFormatter
def __init__(self, **kwargs):
# make sure there are some default aliases in all Tools:
if self.aliases:
self.aliases["log-level"] = "Application.log_level"
self.aliases["config"] = "Tool.config_file"
super().__init__(**kwargs)
self.log_level = logging.INFO
self.is_setup = False
self._registered_components = []
self.version = version
self.raise_config_file_errors = True # override traitlets.Application default
def initialize(self, argv=None):
""" handle config and any other low-level setup """
self.parse_command_line(argv)
if self.config_file is not None:
self.log.debug(f"Loading config from '{self.config_file}'")
try:
self.load_config_file(self.config_file)
except Exception as err:
raise ToolConfigurationError(f"Couldn't read config file: {err}")
self.log.info(f"ctapipe version {self.version_string}")
# ensure command-line takes precedence over config file options:
self.update_config(self.cli_config)
def add_component(self, component_instance):
"""
constructs and adds a component to the list of registered components,
so that later we can ask for the current configuration of all instances,
e.g. in`get_full_config()`. All sub-components of a tool should be
constructed using this function, in order to ensure the configuration is
properly traced.
Parameters
----------
component_instance: Component
constructed instance of a component
Returns
-------
Component:
the same component instance that was passed in, so that the call
can be chained.
Examples
--------
.. code-block:: python3
self.mycomp = self.add_component(MyComponent(parent=self))
"""
self._registered_components.append(component_instance)
return component_instance
@abstractmethod
def setup(self):
"""set up the tool (override in subclass). Here the user should
construct all `Components` and open files, etc."""
pass
@abstractmethod
def start(self):
"""main body of tool (override in subclass). This is automatically
called after `initialize()` when the `run()` is called.
"""
pass
@abstractmethod
def finish(self):
"""finish up (override in subclass). This is called automatically
after `start()` when `run()` is called."""
self.log.info("Goodbye")
def run(self, argv=None):
"""Run the tool. This automatically calls `initialize()`,
`start()` and `finish()`
Parameters
----------
argv: list(str)
command-line arguments, or None to get them
from sys.argv automatically
"""
# return codes are taken from:
# http://tldp.org/LDP/abs/html/exitcodes.html
exit_status = 0
try:
self.initialize(argv)
self.log.info(f"Starting: {self.name}")
Provenance().start_activity(self.name)
self.setup()
self.is_setup = True
self.log.debug(f"CONFIG: {self.get_current_config()}")
Provenance().add_config(self.get_current_config())
self.start()
self.finish()
self.log.info(f"Finished: {self.name}")
Provenance().finish_activity(activity_name=self.name)
except ToolConfigurationError as err:
self.log.error(f"{err}. Use --help for more info")
exit_status = 2 # wrong cmd line parameter
except KeyboardInterrupt:
self.log.warning("WAS INTERRUPTED BY CTRL-C")
Provenance().finish_activity(activity_name=self.name, status="interrupted")
exit_status = 130 # Script terminated by Control-C
except Exception as err:
self.log.exception(f"Caught unexpected exception: {err}")
Provenance().finish_activity(activity_name=self.name, status="error")
exit_status = 1 # any other error
finally:
for activity in Provenance().finished_activities:
output_str = " ".join([x["url"] for x in activity.output])
self.log.info("Output: %s", output_str)
self.log.debug("PROVENANCE: '%s'", Provenance().as_json(indent=3))
with open("provenance.log", mode="w+") as provlog:
provlog.write(Provenance().as_json(indent=3))
self.exit(exit_status)
@property
def version_string(self):
""" a formatted version string with version, release, and git hash"""
return f"{version}"
def get_current_config(self):
""" return the current configuration as a dict (e.g. the values
of all traits, even if they were not set during configuration)
"""
conf = {
self.__class__.__name__: {
k: v.get(self) for k, v in self.traits(config=True).items()
}
}
for component in self._registered_components:
conf.update(component.get_current_config())
return conf
def _repr_html_(self):
""" nice HTML rep, with blue for non-default values"""
traits = self.traits()
name = self.__class__.__name__
lines = [
f"<b>{name}</b>",
f"<p> {self.__class__.__doc__ or self.description} </p>",
"<table>",
]
for key, val in self.get_current_config()[name].items():
default = traits[key].default_value
thehelp = f"{traits[key].help} (default: {default})"
lines.append(f"<tr><th>{key}</th>")
if val != default:
lines.append(f"<td><span style='color:blue'>{val}</span></td>")
else:
lines.append(f"<td>{val}</td>")
lines.append(f'<td style="text-align:left"><i>{thehelp}</i></td></tr>')
lines.append("</table>")
lines.append("<p><i>Components:</i>")
lines.append(", ".join([x.__name__ for x in self.classes]))
lines.append("</p>")
return "\n".join(lines)
def export_tool_config_to_commented_yaml(tool_instance: Tool, classes=None):
"""
Turn the config of a single Component into a commented YAML string.
This is a hacked version of
traitlets.config.Configurable._class_config_section() changed to
output a YAML file with defaults *and* current values filled in.
Parameters
----------
tool_instance: Tool
a constructed Tool instance
classes: list, optional
The list of other classes in the config file.
Used to reduce redundant information.
"""
tool = tool_instance.__class__
config = tool_instance.get_current_config()[tool_instance.__class__.__name__]
def commented(text, indent_level=2, width=70):
"""return a commented, wrapped block."""
return textwrap.fill(
text,
width=width,
initial_indent=" " * indent_level + "# ",
subsequent_indent=" " * indent_level + "# ",
)
# section header
breaker = "#" + "-" * 78
parent_classes = ", ".join(
p.__name__ for p in tool.__bases__ if issubclass(p, Configurable)
)
section_header = f"# {tool.__name__}({parent_classes}) configuration"
lines = [breaker, section_header]
# get the description trait
desc = tool.class_traits().get("description")
if desc:
desc = desc.default_value
if not desc:
# no description from trait, use __doc__
desc = getattr(tool, "__doc__", "")
if desc:
lines.append(commented(desc, indent_level=0))
lines.append(breaker)
lines.append(f"{tool.__name__}:")
for name, trait in sorted(tool.class_traits(config=True).items()):
default_repr = trait.default_value_repr()
current_repr = config.get(name, "")
if isinstance(current_repr, str):
current_repr = f'"{current_repr}"'
if classes:
defining_class = tool._defining_class(trait, classes)
else:
defining_class = tool
if defining_class is tool:
# cls owns the trait, show full help
if trait.help:
lines.append(commented(trait.help))
if "Enum" in type(trait).__name__:
# include Enum choices
lines.append(commented(f"Choices: {trait.info()}"))
lines.append(commented(f"Default: {default_repr}"))
else:
# Trait appears multiple times and isn't defined here.
# Truncate help to first line + "See also Original.trait"
if trait.help:
lines.append(commented(trait.help.split("\n", 1)[0]))
lines.append(f" # See also: {defining_class.__name__}.{name}")
lines.append(f" {name}: {current_repr}")
lines.append("")
return "\n".join(lines)
def run_tool(tool: Tool, argv=None):
"""
Utility run a certain tool in a python session without exitinig
Returns
-------
exit_code: int
The return code of the tool, 0 indicates success, everything else an error
"""
try:
tool.run(argv or [])
return 0
except SystemExit as e:
return e.code
| 35.443864 | 87 | 0.602284 |
79455691ec7239ee3af7412e8cb7a00b6d2af777 | 2,661 | py | Python | setup.py | lpawluczuk/foodwebviz | 11dc6d49e33634ca074f597b9eef0f146c350bcf | [
"BSD-3-Clause"
] | 1 | 2022-03-08T13:53:26.000Z | 2022-03-08T13:53:26.000Z | setup.py | lpawluczuk/foodwebviz | 11dc6d49e33634ca074f597b9eef0f146c350bcf | [
"BSD-3-Clause"
] | 2 | 2021-11-15T14:05:09.000Z | 2022-02-19T10:27:57.000Z | setup.py | lpawluczuk/foodwebviz | 11dc6d49e33634ca074f597b9eef0f146c350bcf | [
"BSD-3-Clause"
] | null | null | null | import sys
from setuptools import setup
if sys.version_info[:2] < (3, 7):
sys.stderr.write(f'foodwebviz requires Python 3.7 or later ({sys.version_info[:2]} detected).\n')
sys.exit(1)
name = "foodwebviz"
description = "Python package for creating and visualizing foodwebs"
authors = {
"Pawluczuk": ("Łukasz Pawluczuk", ""),
"Iskrzyński": ("Mateusz Ikrzyński", ""),
}
maintainer = ""
maintainer_email = ""
url = ""
project_urls = {
"Bug Tracker": "https://github.com/lpawluczuk/foodwebviz/issues",
"Documentation": "https://github.com/lpawluczuk/foodwebviz",
"Source Code": "https://github.com/lpawluczuk/foodwebviz",
}
platforms = ["Linux", "Mac OSX", "Windows", "Unix"]
keywords = [
"foodwebs",
]
classifiers = [ # TODO
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Physics",
]
with open("foodwebviz/__init__.py") as fid:
for line in fid:
if line.startswith("__version__"):
version = line.strip().split()[-1][1:-1]
break
packages = [
"foodwebviz",
"foodwebviz.animation"
]
def parse_requirements_file(filename):
with open(filename) as f:
requires = [x.strip() for x in f.readlines() if not x.startswith("#")]
return requires
install_requires = parse_requirements_file("requirements.txt")
with open("README.md", "r", encoding='utf-8') as fh:
long_description = fh.read()
if __name__ == "__main__":
setup(
name=name,
version=version,
maintainer=maintainer,
maintainer_email=maintainer_email,
author=authors["Pawluczuk"][0],
author_email=authors["Pawluczuk"][1],
description=description,
keywords=keywords,
long_description=long_description,
platforms=platforms,
packages=packages,
url=url,
project_urls=project_urls,
classifiers=classifiers,
install_requires=install_requires,
python_requires=">=3.7",
zip_safe=False,
)
| 29.898876 | 101 | 0.645998 |
79455697737ebe489a36415bafa2e41eac76d05e | 6,426 | py | Python | examples/pwr_run/checkpointing/debug/ovhd_profile/job28.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/debug/ovhd_profile/job28.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/debug/ovhd_profile/job28.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
load_start = time.time()
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.004
args_model = 'densenet169'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_test/' + job_name + '*'
total_epochs = 46
starting_epoch = 0
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess():
save_start = time.time()
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_test/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
save_time = int(time.time() - save_start)
message = job_name + ' save ' + str(save_time)
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
load_time = int(time.time() - load_start)
if args.resume:
message = job_name + ' load ' + str(load_time)
send_signal.send(args.node, 10002, message)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
sys.exit()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=1,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
if not args.resume:
terminateProcess()
| 31.5 | 118 | 0.698724 |
794556a69624c19347e17e808a86077f4e9911e8 | 3,626 | py | Python | bin/user/weatherlink_live/davis_broadcast.py | nickbp/weewx-weatherlink-live | 2c18b5da578e175cec3c893bf73450e53da71fd4 | [
"MIT"
] | 12 | 2021-01-10T12:00:25.000Z | 2022-01-22T09:20:05.000Z | bin/user/weatherlink_live/davis_broadcast.py | nickbp/weewx-weatherlink-live | 2c18b5da578e175cec3c893bf73450e53da71fd4 | [
"MIT"
] | 19 | 2020-10-21T12:38:27.000Z | 2022-03-02T13:33:19.000Z | bin/user/weatherlink_live/davis_broadcast.py | nickbp/weewx-weatherlink-live | 2c18b5da578e175cec3c893bf73450e53da71fd4 | [
"MIT"
] | 2 | 2021-02-19T13:09:04.000Z | 2022-03-01T20:16:55.000Z | # Copyright © 2020-2021 Michael Schantl and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import logging
import select
import threading
from json import JSONDecodeError
from socket import socket, AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_BROADCAST, SO_REUSEADDR
from user.weatherlink_live.callback import PacketCallback
from user.weatherlink_live.packets import WlUdpBroadcastPacket
from weewx import WeeWxIOError
log = logging.getLogger(__name__)
class WllBroadcastReceiver(object):
"""Receive UDP broadcasts from WeatherLink Live"""
def __init__(self, broadcasting_wl_host: str, port: int, callback: PacketCallback):
self.broadcasting_wl_host = broadcasting_wl_host
self.port = port
self.callback = callback
self.wait_timeout = 5
self.sock = None
self.stop_signal = threading.Event()
self.thread = threading.Thread(name='WLL-BroadcastReception', target=self._reception)
self.thread.daemon = True
self.thread.start()
def _reception(self):
log.debug("Starting broadcast reception")
try:
self.sock = socket(AF_INET, SOCK_DGRAM)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.sock.bind(('', self.port))
while not self.stop_signal.is_set():
r, _, _ = select.select([self.sock], [], [], self.wait_timeout)
if not r:
continue
data, source_addr = self.sock.recvfrom(2048)
log.debug("Received %d bytes from %s" % (len(data), source_addr))
try:
json_data = json.loads(data.decode("utf-8"))
except JSONDecodeError as e:
raise WeeWxIOError("Error decoding broadcast packet JSON") from e
packet = WlUdpBroadcastPacket.try_create(json_data, self.broadcasting_wl_host)
self.callback.on_packet_received(packet)
except Exception as e:
self.callback.on_packet_receive_error(e)
raise e
def close(self):
log.debug("Stopping broadcast reception")
self.stop_signal.set()
self.thread.join(self.wait_timeout * 3)
if self.thread.is_alive():
log.warn("Broadcast reception thread still alive. Force closing socket")
if self.sock is not None:
self.sock.close()
self.sock = None
log.debug("Closed broadcast receiving socket")
log.debug("Stopped broadcast reception")
| 38.989247 | 94 | 0.683673 |
7945584c8d1a5a1e7719d9fc20aa9488973ce3a2 | 1,192 | py | Python | src/model/model_creator.py | gauthamp10/mldeploy | 556fe3ff5d19891f73bd9fb1295c4acc4ea6af8e | [
"MIT"
] | null | null | null | src/model/model_creator.py | gauthamp10/mldeploy | 556fe3ff5d19891f73bd9fb1295c4acc4ea6af8e | [
"MIT"
] | null | null | null | src/model/model_creator.py | gauthamp10/mldeploy | 556fe3ff5d19891f73bd9fb1295c4acc4ea6af8e | [
"MIT"
] | 1 | 2020-10-08T23:07:40.000Z | 2020-10-08T23:07:40.000Z | #!/usr/bin/env python
"""Linear Regression model"""
# Importing libraries
import pandas as pd
import pickle
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Loding the dataset
data = pd.read_csv('./dataset/weight-height.csv')
# Preview of the data
data.head()
# Selecting x's and y's
X = data[['Weight']].values
y = data[['Height']].values
# Splitting the data to test and train
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3)
# Creating a LinearRegression object and fitting the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3)
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Testing the model
heights = [161,187,195,156]
prediction = [regressor.predict([[height]]) for height in heights]
prediction = [round(float(weight[0][0]),2) for weight in prediction]
print("Predicted weights:",end='')
print(prediction)
# Saving the model to a file
try:
filename = 'model.pkl'
pickle.dump(regressor, open(filename, 'wb'))
print('Model saved as {}'.format(filename))
except Exception as e:
print("Something went wrong when writing to the file")
print(e)
| 34.057143 | 72 | 0.741611 |
794558df38966a24976969d67b03ba34f8c9a61d | 3,859 | py | Python | vkapi/groups.py | effordsbeard/vk-sdk | 719ef5a1ffa2a0c067dbea5014f40da54f86646b | [
"MIT"
] | null | null | null | vkapi/groups.py | effordsbeard/vk-sdk | 719ef5a1ffa2a0c067dbea5014f40da54f86646b | [
"MIT"
] | null | null | null | vkapi/groups.py | effordsbeard/vk-sdk | 719ef5a1ffa2a0c067dbea5014f40da54f86646b | [
"MIT"
] | null | null | null | from vk import VKAPI
class Groups(VKAPI):
method_class = 'groups'
def __init__(self, access_token=''):
super(Groups, self).__init__(access_token=access_token)
def add_link(self, **params):
self.set_method('addLink')
return self.send(params)
def approve_request(self, **params):
self.set_method('approveRequest')
return self.send(params)
def ban_user(self, **params):
self.set_method('banUser')
return self.send(params)
def create(self, **params):
self.set_method('create')
return self.send(params)
def delete_link(self, **params):
self.set_method('deleteLink')
return self.send(params)
def edit(self, **params):
self.set_method('edit')
return self.send(params)
def edit_link(self, **params):
self.set_method('editLink')
return self.send(params)
def edit_manager(self, **params):
self.set_method('editManager')
return self.send(params)
def edit_place(self, **params):
self.set_method('editPlace')
return self.send(params)
def get(self, **params):
self.set_method('get')
return self.send(params)
def get_banned(self, **params):
self.set_method('getBanned')
return self.send(params)
def get_by_id(self, **params):
self.set_method('getById')
return self.send(params)
def get_callback_confirmation_code(self, **params):
self.set_method('getCallbackConfirmationCode')
return self.send(params)
def get_callback_server_settings(self, **params):
self.set_method('getCallbackServerSettings')
return self.send(params)
def get_callback_settings(self, **params):
self.set_method('getCallbackSettings')
return self.send(params)
def get_catalog(self, **params):
self.set_method('getCatalog')
return self.send(params)
def get_catalog_info(self, **params):
self.set_method('getCatalogInfo')
return self.send(params)
def get_invited_users(self, **params):
self.set_method('getInvitedUsers')
return self.send(params)
def get_invites(self, **params):
self.set_method('getInvites')
return self.send(params)
def get_members(self, **params):
self.set_method('getMembers')
return self.send(params)
def get_requests(self, **params):
self.set_method('getRequests')
return self.send(params)
def get_settings(self, **params):
self.set_method('getSettings')
return self.send(params)
def invite(self, **params):
self.set_method('invite')
return self.send(params)
def is_member(self, **params):
self.set_method('isMember')
return self.send(params)
def join(self, **params):
self.set_method('join')
return self.send(params)
def leave(self, **params):
self.set_method('leave')
return self.send(params)
def remove_user(self, **params):
self.set_method('removeUser')
return self.send(params)
def reorder_link(self, **params):
self.set_method('reorderLink')
return self.send(params)
def search(self, **params):
self.set_method('search')
return self.send(params)
def set_callback_server(self, **params):
self.set_method('setCallbackServer')
return self.send(params)
def set_callback_server_settings(self, **params):
self.set_method('setCallbackServerSettings')
return self.send(params)
def set_callback_settings(self, **params):
self.set_method('setCallbackSettings')
return self.send(params)
def unban_user(self, **params):
self.set_method('unbanUser')
return self.send(params)
| 27.176056 | 63 | 0.634361 |
7945596caa790b97d7a243a15e9bbc8c42f6b623 | 991 | py | Python | Automated_Testing/Selenium/Raymour Flannigan/ray-flan-txt.py | bnonni/Python | 9ebd18caa4e2d805028b557e8b77ea65a9ee1a3d | [
"Apache-2.0"
] | 4 | 2019-10-05T03:41:20.000Z | 2020-11-04T00:39:13.000Z | Automated_Testing/Selenium/Raymour Flannigan/ray-flan-txt.py | bnonni/Python | 9ebd18caa4e2d805028b557e8b77ea65a9ee1a3d | [
"Apache-2.0"
] | null | null | null | Automated_Testing/Selenium/Raymour Flannigan/ray-flan-txt.py | bnonni/Python | 9ebd18caa4e2d805028b557e8b77ea65a9ee1a3d | [
"Apache-2.0"
] | 2 | 2019-10-02T14:08:51.000Z | 2019-10-03T20:49:09.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 2 16:20:59 2018
@author: bryan.nonni
"""
import os
#import time
from datetime import datetime
from selenium import webdriver
#
# Chrome Location, combines webshots folder to the chrome driver
firefox_path = os.path.join('.', 'geckodriver.exe')
#print(firefox_path)
def ray_flan_get_txt(screen_folder):
#not passing driver in function bc we're only looking at one Driver: FF
"""
"""
driver = webdriver.Firefox()
driver.get("https://webapi.raymourflanigan.com/api/product/feed?type=google&delimiter=%7C&encoding=UTF-8")
driver.implicitly_wait(10)
#time.sleep(10)
driver.find_element_by_css_selector('pre').get(text)
driver.close()
## Checking
data_path = datetime.strftime(datetime.today(), '%Y-%m-%d %H-%M')
if data_path in os.listdir(os.getcwd()):
pass
else:
print("File not found, Creating Now . . .")
os.mkdir(data_path)
ray_flan_get_txt(data_path)
| 19.057692 | 110 | 0.677094 |
79455a3ef6d0ddf5b06f7c1e571a1dfc26ea8bfa | 196 | py | Python | stilio/crawler/main.py | fakegit/stilio | cf198b8ccadc7dcadc462ce83b801af00ef4e2f2 | [
"Apache-2.0"
] | 71 | 2019-10-09T17:18:12.000Z | 2022-02-26T12:15:53.000Z | stilio/crawler/main.py | fakegit/stilio | cf198b8ccadc7dcadc462ce83b801af00ef4e2f2 | [
"Apache-2.0"
] | 3 | 2019-10-16T17:52:48.000Z | 2021-12-01T16:50:18.000Z | stilio/crawler/main.py | fakegit/stilio | cf198b8ccadc7dcadc462ce83b801af00ef4e2f2 | [
"Apache-2.0"
] | 11 | 2020-01-21T09:09:14.000Z | 2022-03-27T12:05:36.000Z | from stilio.crawler.dht.crawling import CrawlingService
from stilio.persistence import database
if __name__ == "__main__":
# database.init()
crawler = CrawlingService()
crawler.run()
| 24.5 | 55 | 0.744898 |
79455ba8d7027cf84131d0e0e9714af51335d990 | 16,511 | py | Python | courses/modsim2018/tasks/Tasks_DuringLecture18/BMC-master/functions/io_cortexmac.py | raissabthibes/bmc | 840800fb94ea3bf188847d0771ca7197dfec68e3 | [
"MIT"
] | null | null | null | courses/modsim2018/tasks/Tasks_DuringLecture18/BMC-master/functions/io_cortexmac.py | raissabthibes/bmc | 840800fb94ea3bf188847d0771ca7197dfec68e3 | [
"MIT"
] | null | null | null | courses/modsim2018/tasks/Tasks_DuringLecture18/BMC-master/functions/io_cortexmac.py | raissabthibes/bmc | 840800fb94ea3bf188847d0771ca7197dfec68e3 | [
"MIT"
] | null | null | null | """Read and write Cortex Motion Analysis Corporation ASCII related files.
read_trc(fname, fname2='_2', units='', df_multi=True): Read .trc file.
read_anc(fname): Read .anc file.
read_cal(fname): Read .cal file.
read_forces(fname): Read .forces file.
write_trc(fname, header, df): Write .trc file.
write_v3dtxt(fname, trc, forces, freq=0): Write Visual3d text file
from .trc and .forces files or dataframes.
grf_moments(data, O): Calculate force plate moments around its origin
given 3 forces, 2 COPs, 1 free moment, and its geometric position.
"""
__author__ = "Marcos Duarte, https://github.com/demotu/BMC"
__version__ = "1.0.1"
__license__ = "MIT"
import os
import csv
import numpy as np
import pandas as pd
def read_trc(fname, fname2='_2', units='', df_multi=True):
"""Read .trc file format from Cortex MAC.
This function: 1. Delete markers (columns) of empty data; 2. Correct
number of markers in the header according to the actual number of
non-empty markers; 3. Save a '.trc' file with updated information and
data; 4. Returns header information and data.
The .trc (Track Row Column) file in ASCII contains X-Y-Z position
data for the reflective markers from a motion capture trial. The
position data for each marker is organized into 3 columns per marker
(X, Y and Z position) with each row being a new frame. The position
data is relative to the global coordinate system of the capture volume
and the position values are in the units used for calibration.
Parameters
----------
fname : string
Full file name of the .trc file to be opened.
fname2 : string (default = '_2')
Full file name of the .trc file to be saved with updated information
and data if desired.
If fname2 is '', no file is saved.
If fname2 is '=', the original file name will be used.
If fname2 is a string with length between 1 and 3, this string (other
than '=') is appended to the original file name.
units : string (default = '')
Change the units of the data if desired.
Accepted output units are 'm' or 'mm'.
df_multi : bool (default = True)
Whether to output data as pandas multiindex dataframe with "Marker"
and "Coordinate" as labels and "Time" as index (True) or simple
pandas dataframe with "Frame#" and "Time" as columns (False).
Returns
-------
h : Python dictionary with .trc header information
keys: header (the .trc full header), data_rate (Hz), camera_rate (Hz),
nframes, nmarkers, markers (names), xyz (X1,Y1,Z1...), units.
data : pandas dataframe
Two possible output formats according to the `df_multi` option:
Dataframe with shape (nframes, 2+3*nmarkers) with markerxyz as label
and columns: Frame#, time and position data.
Dataframe with shape (nframes, 3*nmarkers) with "Marker" and
"Coordinate" as labels, "Time" as index, and data position as columns.
"""
with open(file=fname, mode='rt', encoding='utf-8', newline='') as f:
print('Opening file "{}"'.format(fname))
# get header information
read = csv.reader(f, delimiter='\t')
header = [next(read) for x in range(5)]
# actual number of markers
nmarkers = int((len(header[3])-2)/3)
# column labels
markers = np.asarray(header[3])[np.arange(2, 2+3*nmarkers, 3)].tolist()
markers3 = [m for m in markers for i in range(3)]
markersxyz = [a+b for a, b in zip(markers3, ['x', 'y', 'z']*nmarkers)]
# read data
df = pd.read_csv(f, sep='\t', names=['Frame#', 'Time'] + markersxyz,
index_col=False, encoding='utf-8', engine='c')
# drop markers with no data
df.dropna(axis=1, how='all', inplace=True)
# update header
nmarkers = int((df.shape[1]-2)/3)
if header[2][3] != str(nmarkers):
print(' Number of markers changed from {} to {}.'
.format(header[2][3], nmarkers))
header[2][3] = str(nmarkers)
header[3] = ['' if c[-1] in ['y', 'z'] else c[:-1] if c[-1] in ['x']
else c for c in df.columns.values.tolist()] + ['']
markers = np.asarray(header[3])[np.arange(2, 2+3*nmarkers, 3)].tolist()
n3 = np.repeat(range(1, nmarkers+1), 3).tolist()
xyz = [a+str(b) for a, b in zip(['X', 'Y', 'Z']*nmarkers, n3)]
header[4] = ['', ''] + xyz
if units == 'm':
if header[2][4] == 'mm':
df.iloc[:, 2:] = df.iloc[:, 2:]/1000
header[2][4] = 'm'
print(' Units changed from {} to {}'.format('"mm"', '"m"'))
elif units == 'mm':
if header[2][4] == 'm':
df.iloc[:, 2:] = df.iloc[:, 2:]*1000
header[2][4] = 'mm'
print(' Units changed from {} to {}'.format('"m"', '"mm"'))
# save file
if len(fname2):
if fname2 == '=':
fname2 = fname
elif len(fname2) <= 3:
name, extension = os.path.splitext(fname)
fname2 = name + fname2 + extension
write_trc(fname2, header, df)
# outputs
h = {'header': header,
'data_rate': float(header[2][0]),
'camera_rate': float(header[2][1]),
'nframes': int(header[2][2]),
'nmarkers': int(header[2][3]),
'markers': markers,
'xyz': xyz,
'units': header[2][4],
'fname': fname,
'fname2': fname2}
if df_multi:
df.drop(labels='Frame#', axis=1, inplace=True)
df.set_index('Time', inplace=True)
df.index.name = 'Time'
cols = [s[:-1] for s in df.columns.str.replace(r'.', r'_')]
df.columns = [cols, list('XYZ')*int(df.shape[1]/3), xyz]
df.columns.set_names(names=['Marker', 'Coordinate', 'XYZ'], level=[0, 1, 2], inplace=True)
return h, df
def read_anc(fname):
"""Read .anc file format from Cortex MAC.
The .anc (Analog ASCII Row Column) file contain ASCII analog data
in row-column format. The data is derived from *.anb analog binary
files. These binary *.anb files are generated simultaneously with
video *.vc files if an optional analog input board is used in
conjunction with video data capture.
Parameters
----------
fname : string
full file name of the .anc file to be opened
Returns
-------
h : Python dictionary
.anc header information
keys: nbits, polarity, nchannels, data_rate, ch_names, ch_ranges
data : pandas dataframe
analog data with shape (nframes, nchannels)
"""
with open(file=fname, mode='rt', encoding='utf-8', newline='') as f:
# get header information
read = csv.reader(f, delimiter='\t')
header = [next(read) for x in range(11)]
h = {'nbits': int(header[3][1]),
'polarity': header[1][3],
'nchannels': int(header[2][7]),
'data_rate': float(header[3][3]),
'ch_names': header[8],
'ch_ranges': header[10]}
h['ch_names'] = h['ch_names'][1:-1]
h['ch_ranges'] = np.asarray(h['ch_ranges'][1:-1], dtype=np.float)
# analog data
data = pd.read_csv(f, sep='\t', names=h['ch_names'], engine='c',
usecols=np.arange(1, 1+h['nchannels']))
# convert ADC (bit) values to engineering units:
data *= h['ch_ranges']/(2**h['nbits']/2 - 2)
return h, data
def read_cal(fname):
"""Read .cal file format from Cortex MAC.
The .cal (force plate calibration parameters) file in ASCII contains:
<forceplate number> {1}
<scale> <length (cm)> <width (cm)> {2}
<N x N calibration matrix (the inverse sensitivity matrix)> {3}
<true origin in relation to the geometric center (cm)>
<geometric center in relation to LCS origin (cm)>
<3 x 3 orientation matrix>
...repeat for next force plate...
{1}: for a Kistler force plate, there is a 'K' after the number
{2}: the scale is the inverse of the gain
{3}: N equal 8 for Kistler and equal 6 for all AMTI and Bertec
Parameters
----------
fname : string
full file name of the .trc file to be opened
Returns
-------
forcepla : Python dictionary
parameter from the froce plate calibration file
keys: 'fp', 'scale', 'size', 'cal_matrix', 'origin', 'center', 'orientation'
"""
fp, scale, size, cal_matrix, origin, center, orientation = [], [], [], [], [], [], []
with open(file=fname, mode='rt', encoding='utf-8', newline='') as f:
reader = csv.reader(f, delimiter=' ')
for row in reader:
# force plate number
fp.append(int(row[0][0]))
# number of rows for Kistler or AMTI/Bertec force plate
n = 8 if row[0][-1] == 'K' else 6
# scale (inverse of the gain)
scale_size = np.array(next(reader)).astype(np.float)
scale.append(scale_size[0])
# force plate length (cm) and width (cm)
size.append(scale_size[1:])
# calibration matrix (the inverse sensitivity matrix)
matrix = [next(reader) for x in range(n)]
cal_matrix.append(np.array(matrix).astype(np.float))
# true origin in relation to the geometric center (cm)
origin.append(np.array(next(reader)).astype(np.float))
# geometric center in relation to LCS origin (cm)
center.append(np.array(next(reader)).astype(np.float))
# 3 x 3 orientation matrix
orienta = [next(reader) for x in range(3)]
orientation.append(np.array(orienta).astype(np.float))
forcepla = {'fp': fp, 'scale': scale, 'size': size, 'cal_matrix': cal_matrix,
'origin': origin, 'center': center, 'orientation': orientation}
return forcepla
def read_forces(fname):
"""Read .forces file format from Cortex MAC.
The .forces file in ASCII contains force plate data. The data is saved
based on the forcepla.cal file of the trial and converts the raw force
plate data into calibrated forces. The units used are Newtons and
Newton-meters and each line in the file equates to one analog sample.
Parameters
----------
fname : string
full file name of the .forces file to be opened
Returns
-------
h : Python dictionary
.forces header information
keys: name, nforceplates, data_rate, nsamples, ch_names
data : pandas dataframe
force plate data with shape (nsamples, 7*nforceplates)
"""
with open(file=fname, mode='rt', encoding='utf-8', newline='') as f:
# get header information
read = csv.reader(f, delimiter='\t')
header = [next(read) for x in range(5)]
h = {'name': header[0][0],
'nforceplates': int(header[1][0].split('=')[1]),
'data_rate': float(header[2][0].split('=')[1]),
'nsamples': int(header[3][0].split('=')[1]),
'ch_names': header[4][1:]}
# force plate data
data = pd.read_csv(f, sep='\t', names=h['ch_names'], index_col=False,
usecols=np.arange(1, 1+7*h['nforceplates']), engine='c')
return h, data
def write_trc(fname, header, df):
"""Write .trc file format from Cortex MAC.
See the read_trc.py function.
Parameters
----------
fname : string
Full file name of the .trc file to be saved.
header : list of lists
header for the .trc file
df : pandas dataframe
dataframe with data for the .trc file (with frame and time columns)
"""
with open(file=fname, mode='wt', encoding='utf-8', newline='') as f:
print('Saving file "{}"'.format(fname))
for line in header:
f.write('\t'.join(line) + '\n')
f.write('\n')
df.to_csv(f, header=None, index=None, sep='\t',
line_terminator='\t\n') # float_format='%.8f'
def write_v3dtxt(fname, trc, forces, freq=0):
"""Write Visual3d text file from .trc and .forces files or dataframes.
The .trc and .forces data are assumed to correspond to the same time
interval. If the data have different number of samples (different
frequencies), the data will be resampled to the highest frequency (or to
the inputed frequency if it is higher than the former two) using the tnorm
function.
Parameters
----------
fname : string
Full file name of the Visual3d text file to be saved.
trc : pandas dataframe or string
If string, it is a full file name of the .trc file to read.
If dataframe, data of the .trc file has shape (nsamples, 2 + 3*nmarkers)
where the first two columns are from the Frame and Time values.
Input an empty string '' if there is no .trc file/dataframe (in this
case there must be forces and the input freq is the forces frequency).
forces : pandas dataframe or string
If string, it is a full file name of the .forces file to read.
If dataframe, data of the .forces file has shape (nsamples, 7*nforceplates)
Input an empty string '' if there is no forces file/dataframe (in this
case there must be a trc file/dataframe).
freq : float (optional, dafault=0)
Sampling frequency in Hz to resample data if desired.
Data will be resampled to the highest frequency between freq, trc, forces.
"""
if isinstance(trc, str):
if trc:
_, trc = read_trc(trc, fname2='', units='', df_multi=False)
else:
trc = pd.DataFrame()
if isinstance(forces, str):
if forces:
_, forces = read_forces(forces)
else:
forces = pd.DataFrame()
if trc.shape[0] != forces.shape[0] or freq:
from tnorm import tnorm
freq_trc = 0 if trc.empty else 1/np.nanmean(np.diff(trc.iloc[:, 1].values))
if freq_trc:
freq_forces = 0 if forces.empty else freq_trc*(forces.shape[0]/trc.shape[0])
else:
freq_forces = freq
freq = np.max([freq, freq_trc, freq_forces])
nsample = np.max([trc.shape[0], forces.shape[0]]) * freq/(np.max([freq_trc, freq_forces]))
frame_time = np.vstack((np.arange(1, nsample+1, 1), np.arange(0, nsample, 1)/freq)).T
if freq_trc:
trc2, _, _ = tnorm(trc.iloc[:, 2:].values, step=-nsample)
trc2 = np.hstack((frame_time, trc2))
trc = pd.DataFrame(trc2, index=None, columns=trc.columns)
else:
trc = pd.DataFrame(frame_time, index=None, columns=['Frame#', 'Time'])
if freq_forces:
forces2, _, _ = tnorm(forces.values, step=-nsample)
forces = pd.DataFrame(forces2, index=None, columns=forces.columns)
ntrc = trc.shape[1]
nforces = forces.shape[1]
if nforces:
data = pd.concat([trc, forces], axis=1)
else:
data = trc
with open(file=fname, mode='wt', encoding='utf-8', newline='') as f:
rows = [[''] + ['default']*(ntrc + nforces - 1),
[''] + data.columns.tolist()[1:],
[''] + ['FRAME_NUMBERS'] + ['TARGET']*(ntrc - 2) + ['ANALOG']*nforces,
[''] + ['ORIGINAL']*(ntrc + nforces -1),
[data.columns[0]] + ['0'] + ['X', 'Y', 'Z']*int((ntrc - 2)/3) + ['0']*nforces]
write = csv.writer(f, delimiter='\t')
write.writerows(rows)
write.writerows(data.values)
def grf_moments(data, O):
"""Calculate force plate moments around its origin given
3 forces, 2 COPs, 1 free moment, and its geometric position.
Parameters
----------
data : Numpy array (n, 7)
array with [Fx, Fy, Fz, COPx, COPy, COPz, Tz].
O : Numpy array-like or list
origin [x,y,z] of the force plate in the motion capture coordinate system [in meters].
Returns
-------
grf : Numpy array (n, 8)
array with [Fx, Fy, Fz, Mx, My, Mz]
"""
Fx, Fy, Fz, COPx, COPy, COPz, Tz = np.hsplit(data, 7)
COPz = np.nanmean(COPz) # most cases is zero
Mx = COPy*Fz + COPz*Fy
My = -COPx*Fz - COPz*Fx
Mz = Tz + COPx*Fy - COPy*Fx
Mx = Mx - Fy*O[2] + Fz*O[1]
My = My - Fz*O[0] + Fx*O[2]
Mz = Mz - Fx*O[1] + Fy*O[0]
grf = np.hstack((Fx, Fy, Fz, Mx, My, Mz))
return grf
| 38.13164 | 98 | 0.586034 |
79455c69cabbb2a6f70f4346f4c496791e5bb580 | 23,353 | py | Python | Code/BNF.py | USTCEarthDefense/BNF_code | 4735085fce900f1230a623a0d3db16e8eff4d185 | [
"MIT"
] | null | null | null | Code/BNF.py | USTCEarthDefense/BNF_code | 4735085fce900f1230a623a0d3db16e8eff4d185 | [
"MIT"
] | null | null | null | Code/BNF.py | USTCEarthDefense/BNF_code | 4735085fce900f1230a623a0d3db16e8eff4d185 | [
"MIT"
] | null | null | null | import utils_funcs
import tensorflow as tf
import numpy as np
from sklearn.cluster import KMeans
import joblib as jb
from utils_funcs import FLOAT_TYPE, MATRIX_JITTER, DELTA_JITTER
import sys
import os
from tensorflow import keras
#np.random.seed(47)
#tf.set_random_seed(47)
# run as
print("usage : python *.py gpu=0 rank=5 dataset=article lr=0.001")
print('start')
print(sys.argv)
# parse args
py_name = sys.argv[0]
args = sys.argv[1:]
args_dict = {}
for arg_pair in args:
arg, val_str = arg_pair.split('=')
val_str = val_str.strip()
args_dict[arg] = val_str
arg_gpu_idx_str = args_dict['gpu']
arg_rank = int(args_dict['rank'])
arg_data_name = args_dict['dataset']
arg_lr = float(args_dict['lr'])
print('gpu index = %s' % arg_gpu_idx_str)
print('rank = %d' % arg_rank)
print('learning rate = %e' % arg_lr)
class NTF_HP:
def __init__(self, train_ind, train_y, init_config):
self.train_ind = train_ind
self.train_y = train_y
self.nmod = train_ind.shape[1]
self.uniq_ind = np.unique(self.train_ind, axis=0)
self.num_events = len(self.train_ind)
self.num_entries = len(self.uniq_ind)
# self.log_file_name = init_config['log_file_name']
self.init_U = init_config['U']
self.rank = self.init_U[0].shape[1]
self.batch_size_event = init_config['batch_size_event']
self.batch_size_inner_event = init_config['batch_size_inner_event']
self.batch_size_entry = init_config['batch_size_entry']
self.batch_size_entryEvent = init_config['batch_size_entryEvent']
self.learning_rate = init_config['learning_rate']
# VI Sparse GP
self.B = init_config['inducing_B'] # init with k-means, [len_B, rank]
self.len_B = len(self.B)
self.GP_SCOPE_NAME = "gp_params"
# GP parameters
with tf.variable_scope(self.GP_SCOPE_NAME):
# Embedding params
self.tf_U = [tf.Variable(self.init_U[k], dtype=FLOAT_TYPE,) for k in range(self.nmod)]
#keras.initializers.
# pseudo inputs
self.tf_B = tf.Variable(self.B, dtype=FLOAT_TYPE)
# pseudo outputs
self.tf_mu_alpha = tf.Variable(np.random.randn(self.len_B, 1) * 0.1, dtype=FLOAT_TYPE)
#self.tf_Ltril_alpha = tf.Variable(np.eye(self.len_B), dtype=FLOAT_TYPE)
self.tf_Ltril_alpha = tf.linalg.band_part( tf.Variable(np.eye(self.len_B) * 0.5, dtype=FLOAT_TYPE), -1, 0)
self.tf_log_lengthscale_alpha = tf.Variable(np.zeros([self.B.shape[1], 1]), dtype=FLOAT_TYPE)
self.tf_log_amp_alpha = tf.Variable( init_config[ 'log_amp_alpha'], dtype=FLOAT_TYPE)
self.tf_log_lengthscale_delta = tf.Variable(np.zeros([self.B.shape[1], 1]), dtype=FLOAT_TYPE)
self.tf_log_amp_delta = tf.Variable(init_config['log_amp_delta'], dtype=FLOAT_TYPE)
#self.tf_log_amp_delta = self.tf_log_amp_alpha
self.tf_log_lengthscale_trig = tf.Variable(np.zeros([self.B.shape[1], 1]), dtype=FLOAT_TYPE)
self.tf_log_amp_trig = tf.Variable(init_config['log_amp_trig'], dtype=FLOAT_TYPE)
#self.tf_log_amp_trig = tf.constant( -50, dtype=FLOAT_TYPE)
self.Kmm_alpha = utils_funcs.kernel_cross_tf(self.tf_B, self.tf_B, self.tf_log_amp_alpha,
self.tf_log_lengthscale_alpha)
self.Kmm_alpha = self.Kmm_alpha + MATRIX_JITTER * tf.linalg.eye(self.len_B, dtype=FLOAT_TYPE)
self.Var_alpha = self.tf_Ltril_alpha @ tf.transpose(self.tf_Ltril_alpha)
# KL terms
self.KL_alpha = utils_funcs.KL_q_p_tf(self.Kmm_alpha, self.tf_Ltril_alpha, self.tf_mu_alpha, self.len_B)
# Integral Term
# sum_i < int_0^T lam_i>
# placeholders
self.batch_entry_ind = tf.placeholder(dtype=tf.int32, shape=[self.batch_size_entry, self.nmod])
self.batch_entryEvent_ind = tf.placeholder(dtype=tf.int32, shape=[self.batch_size_entryEvent, self.nmod])
self.batch_entryEvent_y = tf.placeholder(dtype=FLOAT_TYPE, shape=[self.batch_size_entryEvent, 1])
self.batch_event_ind = tf.placeholder(dtype=tf.int32, shape=[self.batch_size_event, self.nmod])
self.batch_event_y = tf.placeholder(dtype=FLOAT_TYPE, shape=[self.batch_size_event, 1])
self.batch_inner_event_ind = tf.placeholder(dtype=tf.int32, shape=[self.batch_size_inner_event, self.nmod])
self.batch_inner_event_y = tf.placeholder(dtype=FLOAT_TYPE, shape=[self.batch_size_inner_event, 1])
self.X_entries = utils_funcs.concat_embeddings(self.tf_U, self.batch_entry_ind)
self.X_entryEvents = utils_funcs.concat_embeddings(self.tf_U, self.batch_entryEvent_ind)
self.X_events = utils_funcs.concat_embeddings(self.tf_U, self.batch_event_ind)
self.X_inner_events = utils_funcs.concat_embeddings(self.tf_U, self.batch_inner_event_ind)
self.tf_T = tf.constant(self.train_y[-1][0] - self.train_y[0][0], dtype=FLOAT_TYPE)
self.tf_T0 = tf.constant(self.train_y[0][0], dtype=FLOAT_TYPE)
self.tf_T1 = tf.constant(self.train_y[-1][0], dtype=FLOAT_TYPE)
# sample posterior base rate ( f )
self.Knm_entries = utils_funcs.kernel_cross_tf(self.X_entries, self.tf_B, self.tf_log_amp_alpha,
self.tf_log_lengthscale_alpha)
self.gp_base_rate_entries = utils_funcs.sample_pst_f_tf(self.tf_mu_alpha, self.tf_Ltril_alpha, self.Kmm_alpha,
self.Knm_entries,
self.tf_log_amp_alpha, MATRIX_JITTER)
self.base_rate_entries = tf.exp(self.gp_base_rate_entries)
# int term 1, using entryEvent
self.int_part1 = self.num_entries / self.batch_size_entry * self.tf_T * tf.reduce_sum(self.base_rate_entries)
# term 2
# Sample posterior decay rate (delta)
self.delta_entries = 1.0 / (
utils_funcs.kernel_cross_tf(self.X_entries, self.X_entryEvents, self.tf_log_amp_delta,
self.tf_log_lengthscale_delta) + DELTA_JITTER)
self.T_sub_y = self.tf_T1 - self.batch_entryEvent_y # [N_e, 1]
self.lag_effect = (1.0 - tf.exp(
-self.delta_entries * tf.transpose(self.T_sub_y))) / self.delta_entries # [ N_i, N_e]
self.K_trig = utils_funcs.kernel_cross_tf(self.X_entries, self.X_entryEvents, self.tf_log_amp_trig,
self.tf_log_lengthscale_trig) # [S,Q]
self.Trig_Mat = self.K_trig * self.lag_effect
# integral term 2
self.int_part2 = self.num_entries / self.batch_size_entry * self.num_events / self.batch_size_entryEvent * tf.reduce_sum(
self.Trig_Mat)
# sample event base rate
self.Knm_events = utils_funcs.kernel_cross_tf(self.X_events, self.tf_B, self.tf_log_amp_alpha,
self.tf_log_lengthscale_alpha)
self.gp_base_rate_events = utils_funcs.sample_pst_f_tf(self.tf_mu_alpha, self.tf_Ltril_alpha, self.Kmm_alpha,
self.Knm_events, self.tf_log_amp_alpha, MATRIX_JITTER)
self.base_rate_events = tf.exp(self.gp_base_rate_events)
self.event_delay = self.batch_event_y - tf.transpose(self.batch_inner_event_y)
self.valid_event_delay = tf.cast(self.event_delay > 0, FLOAT_TYPE)
self.event_delay = self.event_delay * self.valid_event_delay
self.delta_eventSum = 1.0 / (
utils_funcs.kernel_cross_tf(self.X_events, self.X_inner_events, self.tf_log_amp_delta,
self.tf_log_lengthscale_delta) + DELTA_JITTER)
self.event_delay_effect = tf.exp(- self.delta_eventSum * self.event_delay)
self.K_trig_event = utils_funcs.kernel_cross_tf(self.X_events, self.X_inner_events, self.tf_log_amp_trig,
self.tf_log_lengthscale_trig)
self.trig_mat_event = self.K_trig_event * self.event_delay_effect * self.valid_event_delay
self.trig_effects_eventSum = tf.reduce_sum( self.trig_mat_event, axis=1, keepdims=True)
# Bias est of event rates
self.event_rates = self.base_rate_events + self.num_events / self.batch_size_inner_event * self.trig_effects_eventSum
# sum term
self.event_sum_term = self.num_events / self.batch_size_event * tf.reduce_sum(tf.log(self.event_rates))
sqr_U = [tf.reduce_sum(U_i * U_i) for U_i in self.tf_U]
self.U_kernel_mag = tf.reduce_sum(sqr_U)
###
self.train_ELBO = 1.0 * self.event_sum_term - self.int_part1 - self.int_part2 - self.KL_alpha # - self.KL_delta
###
self.train_ELBO_hist = []
# setting
self.min_opt = tf.train.AdamOptimizer(self.learning_rate)
self.min_step = self.min_opt.minimize(- self.train_ELBO)
# GPU settings
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
self.sess.run(tf.global_variables_initializer())
self.entries_ind_gnrt = utils_funcs.DataGenerator(self.uniq_ind)
self.event_ind_y_gnrt = utils_funcs.DataGenerator(self.train_ind, self.train_y)
self.entryEvent_ind_y_gnrt = utils_funcs.DataGenerator(self.train_ind, self.train_y)
self.inner_event_ind_y_gnrt = utils_funcs.DataGenerator(self.train_ind, self.train_y)
self.isTestGraphInitialized = False
def train(self, steps=1, print_every=100, test_every=False,
val_error=False, val_all_ind=None, val_all_y=None,
val_test_ind=None, val_test_y=None, verbose=False):
print('start')
for step in range(1, steps + 1):
if step % print_every == 0:
print("step = %d " % step)
batch_entries_ind = self.entries_ind_gnrt.draw_next(self.batch_size_entry)
batch_event_ind, batch_event_y = self.event_ind_y_gnrt.draw_next(self.batch_size_event)
batch_entryEvent_ind, batch_entryEvent_y = self.entryEvent_ind_y_gnrt.draw_next(self.batch_size_entryEvent)
batch_inner_event_ind, batch_inner_event_y = self.inner_event_ind_y_gnrt.draw_next(
self.batch_size_inner_event)
train_feed_dict = {self.batch_entry_ind: batch_entries_ind,
self.batch_event_ind: batch_event_ind, self.batch_event_y: batch_event_y,
self.batch_entryEvent_ind: batch_entryEvent_ind,
self.batch_entryEvent_y: batch_entryEvent_y,
self.batch_inner_event_ind: batch_inner_event_ind,
self.batch_inner_event_y: batch_inner_event_y}
_, ELBO_ret_min_step, KL_alpha, int_part1, int_part2, event_sum_term, entry_base_rate, delta_entries, = self.sess.run(
[self.min_step, self.train_ELBO, self.KL_alpha, self.int_part1, self.int_part2, self.event_sum_term,
self.base_rate_entries, self.delta_entries],
feed_dict=train_feed_dict, options=self.run_options
)
self.train_ELBO_hist.append(ELBO_ret_min_step)
if step % print_every == 0:
int_term = int_part1 + int_part2
int_event_sum_ratio = np.abs( int_term / event_sum_term)
print(
"ELBO = %g, KL_alpha = %g, int_part1 = %g, int_part2 = %g, event sum = %g, ratio = %g, base rate max = %g, min = %g, delta max = %g, min = %g"
% (ELBO_ret_min_step, KL_alpha, int_part1, int_part2, event_sum_term,int_event_sum_ratio, np.max(entry_base_rate),
np.min(entry_base_rate), np.max(delta_entries), np.min(delta_entries)))
# End min step<<=======================
if step % print_every == 0:
amp_alpha, amp_trig, amp_delta, U_reg, = self.check_vars(
[self.tf_log_amp_alpha, self.tf_log_amp_trig, self.tf_log_amp_delta, self.U_kernel_mag])
amp_alpha, amp_trig, amp_delta = np.exp([amp_alpha, amp_trig, amp_delta])
print('amp_alpha = %g, amp_trig = %g, amp_delta = %g, U_mag = %g' % (
amp_alpha, amp_trig, amp_delta, U_reg))
return self
def create_standAlone_test_graph(self, test_ind, test_y):
print("Create testing graph")
self.test_ind = test_ind
self.test_y = test_y
self.ind_uniq_test = np.unique(test_ind, axis=0)
self.num_uniq_ind_test = len(self.ind_uniq_test)
self.num_test_events = len(test_ind)
self.tf_test_event_ind_test = tf.constant(self.test_ind, dtype=tf.int32) # ALL test events
self.tf_test_event_y_test = tf.constant(self.test_y, dtype=FLOAT_TYPE) # ALL test events
self.tf_batch_entries_ind_test = tf.placeholder(dtype=tf.int32, shape=[None, self.nmod])
# integral term
# Use full testing event term when calculating batch integral terms
self.T_test = tf.constant(self.test_y[-1][0] - self.test_y[0][0], dtype=FLOAT_TYPE)
self.T0_test = tf.constant(self.test_y[0][0], dtype=FLOAT_TYPE)
self.T1_test = tf.constant(self.test_y[-1][0], dtype=FLOAT_TYPE)
self.X_batch_entries_test = utils_funcs.concat_embeddings(self.tf_U, self.tf_batch_entries_ind_test)
self.X_all_test_events_test = utils_funcs.concat_embeddings(self.tf_U, self.tf_test_event_ind_test)
self.Knm_entries_test = utils_funcs.kernel_cross_tf(self.X_batch_entries_test, self.tf_B,
self.tf_log_amp_alpha, self.tf_log_lengthscale_alpha)
self.gp_base_rate_entries_test = utils_funcs.sample_pst_f_tf_MLE(self.tf_mu_alpha, self.Kmm_alpha,
self.Knm_entries_test)
self.base_rate_entries_test = tf.exp(self.gp_base_rate_entries_test)
# term1
self.int_term1_test = tf.reduce_sum(self.base_rate_entries_test) * self.T_test
delta_entries_test = 1.0 / (utils_funcs.kernel_cross_tf(self.X_batch_entries_test, self.X_all_test_events_test,
self.tf_log_amp_delta,
self.tf_log_lengthscale_delta) + DELTA_JITTER)
T_sub_y_test = self.T1_test - self.tf_test_event_y_test
lag_effect_test = (1.0 - tf.exp(- delta_entries_test * tf.transpose(T_sub_y_test))) / delta_entries_test
self.K_trig_term3_test = utils_funcs.kernel_cross_tf(self.X_batch_entries_test, self.X_all_test_events_test,
self.tf_log_amp_trig, self.tf_log_lengthscale_trig)
self.term3_mat = self.K_trig_term3_test * lag_effect_test
# term3
self.int_term3_test = tf.reduce_sum(self.term3_mat)
self.int_term_test = self.int_term1_test + self.int_term3_test
# event sum term
self.tf_batch_event_ind_test = tf.placeholder(dtype=tf.int32, shape=[None, self.nmod])
self.tf_batch_event_y_test = tf.placeholder(dtype=FLOAT_TYPE, shape=[None, 1])
self.X_batch_event_test = utils_funcs.concat_embeddings(self.tf_U, self.tf_batch_event_ind_test)
self.test_eventSum_Knm = utils_funcs.kernel_cross_tf(self.X_batch_event_test, self.tf_B, self.tf_log_amp_alpha,
self.tf_log_lengthscale_alpha)
self.gp_eventSum_base_rate = utils_funcs.sample_pst_f_tf_MLE(self.tf_mu_alpha, self.Kmm_alpha,
self.test_eventSum_Knm)
self.eventSum_base_rate = tf.exp(self.gp_eventSum_base_rate) # [ N_prime_batch, 1]
self.event_delay_test = self.tf_batch_event_y_test - tf.transpose(self.tf_test_event_y_test)
self.valid_event_delay_test = tf.cast(self.tf_batch_event_y_test > tf.transpose(self.tf_test_event_y_test),
dtype=FLOAT_TYPE)
self.event_delay_test = self.event_delay_test * self.valid_event_delay_test
self.delta_eventSum_test = 1.0 / (
utils_funcs.kernel_cross_tf(self.X_batch_event_test, self.X_all_test_events_test,
self.tf_log_amp_delta, self.tf_log_lengthscale_delta) + DELTA_JITTER)
self.event_delay_effect_test = tf.exp(- self.delta_eventSum_test * self.event_delay_test)
self.K_trig_event_sum_test = utils_funcs.kernel_cross_tf(self.X_batch_event_test, self.X_all_test_events_test,
self.tf_log_amp_trig, self.tf_log_lengthscale_trig)
self.trig_mat_event_test = self.K_trig_event_sum_test * self.event_delay_effect_test * self.valid_event_delay_test
self.event_rates_test = self.eventSum_base_rate + tf.reduce_sum(self.trig_mat_event_test, axis=1, keepdims=True)
# eventsum term
self.eventSum_term = tf.reduce_sum(tf.log(self.event_rates_test))
self.isTestGraphInitialized = True
return self
def test(self, entries_batch_size, event_batch_size, verbose=False):
if not self.isTestGraphInitialized:
raise NameError("Test Graph hasn't been initialized")
# Calculate entries term
# Using full events
if entries_batch_size > self.num_uniq_ind_test:
entries_batch_size = self.num_uniq_ind_test
lst_int_terms = []
lst_term1s = []
lst_term3s = []
cur_idx = 0
end_idx = cur_idx + entries_batch_size
while cur_idx < self.num_uniq_ind_test:
batch_entries_test = self.ind_uniq_test[cur_idx:end_idx]
feed_dict = {self.tf_batch_entries_ind_test: batch_entries_test}
batch_int_term, batch_int_term1, batch_int_term3 = self.sess.run(
[self.int_term_test, self.int_term1_test, self.int_term3_test],
feed_dict=feed_dict)
lst_int_terms.append(batch_int_term)
lst_term1s.append(batch_int_term1)
lst_term3s.append(batch_int_term3)
if verbose:
print("int terms %d ~ %d, int_term = %g, int term1 = %g, int term3 = %g" % (
cur_idx, end_idx, lst_int_terms[-1], lst_term1s[-1], lst_term3s[-1]))
cur_idx += entries_batch_size
end_idx += entries_batch_size
if end_idx >= self.num_uniq_ind_test:
end_idx = self.num_uniq_ind_test
int_term = np.sum(lst_int_terms)
int_term1 = np.sum(lst_term1s)
int_term3 = np.sum(lst_term3s)
if event_batch_size > self.num_test_events:
event_batch_size = self.num_test_events
lst_eventSum_terms = []
cur_idx = 0
end_idx = cur_idx + event_batch_size
while cur_idx < self.num_test_events:
batch_test_event_ind = self.test_ind[cur_idx:end_idx]
batch_test_event_y = self.test_y[cur_idx:end_idx]
feed_dict = {self.tf_batch_event_ind_test: batch_test_event_ind,
self.tf_batch_event_y_test: batch_test_event_y}
event_sum = self.sess.run(self.eventSum_term, feed_dict=feed_dict)
lst_eventSum_terms.append(event_sum)
if verbose:
print("eventSum terms %d ~ %d, event_sum = %g " % (cur_idx, end_idx, lst_eventSum_terms[-1]))
cur_idx += event_batch_size
end_idx += event_batch_size
if end_idx >= self.num_test_events:
end_idx = self.num_test_events
eventSum_term = np.sum(lst_eventSum_terms)
test_log_P = - int_term + eventSum_term
return test_log_P, int_term, int_term1, int_term3, eventSum_term,
def check_vars(self, var_list):
batch_entries_ind = self.entries_ind_gnrt.draw_last()
batch_event_ind, batch_event_y = self.event_ind_y_gnrt.draw_last()
batch_entryEvent_ind, batch_entryEvent_y = self.entryEvent_ind_y_gnrt.draw_last()
batch_inner_event_ind, batch_inner_event_y = self.inner_event_ind_y_gnrt.draw_last()
train_feed_dict = {self.batch_entry_ind: batch_entries_ind,
self.batch_event_ind: batch_event_ind, self.batch_event_y: batch_event_y,
self.batch_entryEvent_ind: batch_entryEvent_ind, self.batch_entryEvent_y: batch_entryEvent_y,
self.batch_inner_event_ind: batch_inner_event_ind,
self.batch_inner_event_y: batch_inner_event_y}
ret = self.sess.run(var_list, feed_dict=train_feed_dict)
return ret
def test_data_set():
(ind, y), (train_ind, train_y), (test_ind, test_y) = utils_funcs.load_dataSet(arg_data_name, '../Data')
nmod = ind.shape[1]
nvec = np.max(ind, axis=0) + 1
R = arg_rank
U = [np.random.rand(nvec[k], R) * 1.0 for k in range(nmod)]
init_config = {}
init_config['U'] = U
init_config['batch_size_event'] = 64
init_config['batch_size_entry'] = 64
init_config['batch_size_inner_event'] = 4096
init_config['batch_size_entryEvent'] = 4096
init_config['learning_rate'] = arg_lr
init_config['log_amp_alpha'] = 0.0
init_config['log_amp_delta'] = 0.0
init_config['log_amp_trig'] = -3
len_B = 128 # Base Rate
model_config = {
'log_amp_alpha' : init_config['log_amp_alpha'],
'log_amp_delta' : init_config['log_amp_delta'],
'log_amp_trig' : init_config['log_amp_trig'],
'rank' : arg_rank,
'MATRIX_JITTER' : MATRIX_JITTER,
'DELTA_JITTER': DELTA_JITTER,
'lr' : arg_lr,
'batch_size_event' : init_config['batch_size_event'],
'batch_size_entry' : init_config['batch_size_entry'],
'batch_size_inner_event' : init_config['batch_size_inner_event'],
'batch_size_entryEvent' : init_config['batch_size_entryEvent'],
'num_psd_points' : len_B
}
print('launching Kmeans')
B = utils_funcs.init_base_gp_pseudo_inputs(U, train_ind, len_B)
print(B.shape)
print('Kmeans end')
# VI Sparse GP
init_config['inducing_B'] = B # init with k-means, [len_B, rank]
model = NTF_HP(train_ind, train_y, init_config)
model.create_standAlone_test_graph(test_ind, test_y)
steps_per_epoch = int(len(train_ind) / init_config['batch_size_event'])
num_epoch = 50
log_file = utils_funcs.init_log_file( './BNF.txt', arg_data_name, model_config )
for epoch in range(1, num_epoch + 1):
print('epoch %d\n' % epoch)
model.train(steps_per_epoch, int(steps_per_epoch / 5))
test_log_p, int_term, int_term1, int_term3, eventSum_term = model.test(128, 16, verbose=False)
log_file.write( '%g\n' % test_log_p)
log_file.flush()
os.fsync(log_file.fileno())
log_file.close()
model.sess.close()
if __name__ == '__main__':
test_data_set()
| 48.550936 | 164 | 0.653621 |
79455d585eb82cbd72e5098eef6afcd346f9c154 | 39 | py | Python | testdata/py.py | Jalitha/mimetype | 99f0225530f10a1730df54f740212a0a6f1d67e5 | [
"MIT"
] | 702 | 2018-07-03T09:47:05.000Z | 2022-03-31T05:55:10.000Z | testdata/py.py | Jalitha/mimetype | 99f0225530f10a1730df54f740212a0a6f1d67e5 | [
"MIT"
] | 248 | 2018-08-09T20:55:05.000Z | 2022-03-31T10:52:23.000Z | testdata/py.py | Jalitha/mimetype | 99f0225530f10a1730df54f740212a0a6f1d67e5 | [
"MIT"
] | 129 | 2018-08-11T13:03:19.000Z | 2022-03-31T10:38:08.000Z | #!/usr/bin/python
print("Olá, Mundo!")
| 13 | 20 | 0.641026 |
79455e134f3d1ca1c435852d2d4f4baca5f68e2c | 13,314 | py | Python | strax/storage/mongo.py | ahiguera-mx/strax | c2bcd5e34abe702666cd8bcb2bd6bba542ffb852 | [
"BSD-3-Clause"
] | 17 | 2018-05-06T17:46:42.000Z | 2021-11-16T18:20:27.000Z | strax/storage/mongo.py | AxFoundation/strax | 621e3027cd25d705dd473ec378164f4b4c1a5c50 | [
"BSD-3-Clause"
] | 420 | 2018-05-04T13:56:34.000Z | 2022-03-09T16:50:19.000Z | strax/storage/mongo.py | ahiguera-mx/strax | c2bcd5e34abe702666cd8bcb2bd6bba542ffb852 | [
"BSD-3-Clause"
] | 38 | 2018-05-04T13:55:22.000Z | 2022-01-13T17:42:13.000Z | """I/O format for MongoDB
This plugin is designed with data monitoring in mind, to put smaller
amounts of extracted data into a database for quick access. However
it should work with any plugin.
Note that there is no check to make sure the 16MB document size
limit is respected!
"""
import strax
import numpy as np
from pymongo import MongoClient, DESCENDING
from strax import StorageFrontend, StorageBackend, Saver
from datetime import datetime
from pytz import utc as py_utc
from warnings import warn
from sys import getsizeof
export, __all__ = strax.exporter()
# Some data is stored in the buffer. Delete when either of these values
# are exceeded
DEFAULT_MONGO_BACKEND_BUFFER_MB = 200
DEFAULT_MONGO_BACKEND_BUFFER_NRUNS = 5
@export
class MongoBackend(StorageBackend):
"""Mongo storage backend"""
def __init__(self, uri, database, col_name=None):
"""
Backend for reading/writing data from Mongo
:param uri: Mongo url (with pw and username)
:param database: name of database (str)
:param col_name: collection name (str) to look for data
"""
self.client = MongoClient(uri)
self.db = self.client[database]
self.col_name = col_name
# Attributes for the chunks-buffer
self.chunks_registry = {}
self._buffered_backend_keys = []
self._buff_mb = DEFAULT_MONGO_BACKEND_BUFFER_MB
self._buff_nruns = DEFAULT_MONGO_BACKEND_BUFFER_NRUNS
def _read_chunk(self, backend_key, chunk_info, dtype, compressor):
"""See strax.Backend"""
chunk_i = chunk_info["chunk_i"]
registry_key = backend_key + str(chunk_i)
# Build the chunk-registry if not done already, also rebuild if
# the key is not in the registry (will fail below if also not
# there on rebuild).
if registry_key not in self.chunks_registry.keys():
self._build_chunk_registry(backend_key)
# Unpack info about this chunk from the query. Return empty if
# not available. Use a *string* in the registry to lookup the
# chunk-data (like we do in _build_chunk_registry).
doc = self.chunks_registry.get(registry_key, None)
if doc is None:
# Did not find the data. NB: can be that the query is off in
# the _build_chunk_registry. In case you end up here but did
# not expect that, double check that self.chunks_registry is
# not an empty dict!
raise ValueError(
f'Metadata claims chunk{chunk_i} exists but it is unknown to '
f'the chunks_registry')
chunk_doc = doc.get('data', None)
if chunk_doc is None:
raise ValueError(f'Doc for chunk_{chunk_i} in wrong format:\n{doc}')
# Convert JSON to numpy
chunk_len = len(chunk_doc)
result = np.zeros(chunk_len, dtype=dtype)
for i in range(chunk_len):
for key in np.dtype(dtype).names:
result[i][key] = chunk_doc[i][key]
return result
def _saver(self, key, metadata, **kwargs):
"""See strax.Backend"""
# Use the key to make a collection otherwise, use the backend-key
col = self.db[self.col_name if self.col_name is not None else str(key)]
return MongoSaver(key, metadata, col, **kwargs)
def _get_metadata(self, key):
"""See strax.Backend"""
query = backend_key_to_query(key)
# Make sure to get the last of the meta-data docs. Otherwise we
# might be getting a previously failed document. Sort argument
# should be obsolete (due to the self.col.delete_many in the
# MongoSaver) but rather safe than sorry.
doc = self.db[self.col_name].find_one({
**query, 'provides_meta': True},
sort=[('write_time', DESCENDING)])
if doc and 'metadata' in doc:
return doc['metadata']
raise strax.DataNotAvailable
def _build_chunk_registry(self, backend_key):
"""
Build chunk info in a single registry using only one query to
the database. This is much faster as one does not have to do
n-chunk queries to the database. Just one will do. As the
documents-size is limited to 16 MB, it's unlikely that we will
run into memory issues (that we otherwise would not run into).
:param backend_key: strax.DataKey to query the collection for
"""
query = backend_key_to_query(backend_key)
chunks_registry = self.db[self.col_name].find(
{**query, 'provides_meta': False},
{"chunk_i": 1, "data": 1})
# We are going to convert this to a dictionary as that is
# easier to lookup
for doc in chunks_registry:
chunk_key = doc.get('chunk_i', None)
if chunk_key is None:
# Should not happen because of the projection in find
# but let's double check:
raise ValueError(
f'Projection failed, got doc with no "chunk_i":\n{doc}')
# Update our registry with this chunks info. Use chunk_i as
# chunk_key. Make it a *string* to avoid potential key-error
# issues or json-encoding headaches.
self.chunks_registry[backend_key + str(chunk_key)] = doc.copy()
# Some bookkeeping to make sure we don't buffer too much in this
# backend. We still need to return at least one hence the 'and'.
# See: https://github.com/AxFoundation/strax/issues/346
if backend_key not in self._buffered_backend_keys:
self._buffered_backend_keys.append(backend_key)
while ((getsizeof(self.chunks_registry) / 1e6 > self._buff_mb
and len(self._buffered_backend_keys) > 1)
or len(self._buffered_backend_keys) > self._buff_nruns):
self._clean_first_key_from_registry()
def _clean_first_key_from_registry(self):
"""
Remove the first item in the self.buffered_keys and all the
associated keys in the self.chunks_registry to limit RAM-usage
"""
# only clean the first entry from the list
to_clean = self._buffered_backend_keys[0]
for registry_key in list(self.chunks_registry.keys()):
if to_clean in registry_key:
del self.chunks_registry[registry_key]
del self._buffered_backend_keys[0]
@export
class MongoFrontend(StorageFrontend):
"""MongoDB storage frontend"""
def __init__(self, uri, database, col_name=None, *args, **kwargs):
"""
MongoFrontend for reading/writing data from Mongo
:param uri: Mongo url (with pw and username)
:param database: name of database (str)
:param col_name: collection name (str) to look for data
:param args: init for StorageFrontend
:param kwargs: init for StorageFrontend
"""
super().__init__(*args, **kwargs)
self.client = MongoClient(uri)
self.db = self.client[database]
self.backends = [MongoBackend(uri, database, col_name=col_name)]
self.col_name = col_name
def _find(self, key, write, allow_incomplete, fuzzy_for,
fuzzy_for_options):
"""See strax.Frontend"""
if write:
return self.backends[0].__class__.__name__, str(key)
query = backend_key_to_query(str(key))
if self.db[self.col_name].count_documents(query):
self.log.debug(f"{key} is in cache.")
return self.backends[0].__class__.__name__, str(key)
self.log.debug(f"{key} is NOT in cache.")
raise strax.DataNotAvailable
@export
class MongoSaver(Saver):
allow_rechunk = False
def __init__(self, key, metadata, col, **kwargs):
"""
Mongo saver
:param key: strax.Datakey
:param metadata: metadata to save belonging to data
:param col: collection (NB! pymongo collection object) of mongo
instance to write to
"""
super().__init__(metadata, **kwargs)
self.col = col
# All meta_documents should have the key to query against
basic_meta = backend_key_to_query(key).copy()
# Start with a clean sheet, we are just going to overwrite
self.col.delete_many(basic_meta)
# Add datetime objects as candidates for TTL collections. Either
# can be used according to the preference of the user to index.
# Two entries can be used:
# 1. The time of writing.
# 2. The time of data taking.
basic_meta['write_time'] = datetime.now(py_utc)
# The run_start_time below is a placeholder and will be updated
# in the _save_chunk_metadata for the first chunk. Nevertheless
# we need an object in case there e.g. is no chunk.
basic_meta['run_start_time'] = datetime.now(py_utc)
# Add flag to doc that we are providing the metadata
basic_meta['provides_meta'] = True
# If available later update with this value:
self.run_start = None
# This info should be added to all of the associated documents
self.basic_md = basic_meta
# For the metadata copy this too:
meta_data = basic_meta.copy()
meta_data['metadata'] = self.md
# Save object_ids for fast querying and updates
self.id_md = self.col.insert_one(meta_data).inserted_id
# Also save all the chunks
self.ids_chunk = {}
def _save_chunk(self, data, chunk_info, executor=None):
"""see strax.Saver"""
chunk_i = chunk_info['chunk_i']
if getattr(data, 'nbytes') > 10_000_000:
warn('Inserting documents of size > 10 MB, this is getting '
'close to the 16 MB document size in mongo',
UserWarning)
aggregate_data = []
# Remove the numpy structures and parse the data. The dtype
# information is saved with the metadata so don't worry
for row in data:
ins = {}
for key in list(data.dtype.names):
ins[key] = row[key]
ins = remove_np(ins)
aggregate_data.append(ins)
# Get the document to update, if none available start a new one
# for this chunk
chunk_id = self.ids_chunk.get(chunk_i, None)
# We can fail here if the document is too large to be written
# out to mongo. One could do a try: except
# pymongo.errors.WriteError: pass, but that potentially leads to
# abuse of a Mongo instance going unnoticed.
if chunk_id is not None:
# In principle this should not end up here as each chunk
# should be it's own document unless you re-chunk
self.col.update_one({'_id': chunk_id},
{'$push': {f'data': aggregate_data}})
else:
# Start a new document, update it with the proper information
doc = self.basic_md.copy()
doc['write_time'] = datetime.now(py_utc)
doc['chunk_i'] = chunk_i
doc["data"] = aggregate_data
doc['provides_meta'] = False
chunk_id = self.col.insert_one(doc).inserted_id
self.ids_chunk[chunk_i] = chunk_id
return dict(), None
def _save_chunk_metadata(self, chunk_info):
"""see strax.Saver"""
# For the first chunk we get the run_start_time and update the
# run-metadata file
if int(chunk_info['chunk_i']) == 0:
self.run_start = datetime.fromtimestamp(
chunk_info['start']/1e9).replace(tzinfo=py_utc)
self.col.update_one({'_id': self.id_md},
{'$addToSet': {'metadata.chunks': chunk_info}})
def _close(self):
"""see strax.Saver"""
# First update the run-starts of all of the chunk-documents as
# this is a TTL index-candidate
if self.run_start is not None:
update = {'run_start_time': self.run_start}
query = {k: v for k, v in self.basic_md.items()
if k in ('number', 'data_type', 'lineage_hash')}
self.col.update_many(query, {'$set': update})
# Update the metadata
update = {f'metadata.{k}': v
for k, v in self.md.items()
if k in ('writing_ended', 'exception')}
# Also update all of the chunk-documents with the run_start_time
self.col.update_one({'_id': self.id_md}, {'$set': update})
def backend_key_to_query(backend_key):
"""Convert backend key to queryable dictionary"""
n, d, l = backend_key.split('-')
return {'number': int(n), 'data_type': d, 'lineage_hash': l}
def remove_np(dictin):
"""Remove numpy types from a dict so it can be inserted into
mongo."""
if isinstance(dictin, dict):
result = {}
for k in dictin.keys():
result[k] = remove_np(dictin[k])
elif isinstance(dictin, (np.ndarray, list)):
result = []
for k in dictin:
result.append(remove_np(k))
elif isinstance(dictin, np.integer):
return int(dictin)
elif isinstance(dictin, np.floating):
return float(dictin)
else:
return dictin
return result
| 39.862275 | 80 | 0.626333 |
79455f3431b74d5f29d3f57cadc4338ec8577dc4 | 32,769 | py | Python | scvi/models/modules.py | lgyzngc/scvi | b4472e7d02a3889c405078cdd7ab4d4378309c2c | [
"MIT"
] | null | null | null | scvi/models/modules.py | lgyzngc/scvi | b4472e7d02a3889c405078cdd7ab4d4378309c2c | [
"MIT"
] | null | null | null | scvi/models/modules.py | lgyzngc/scvi | b4472e7d02a3889c405078cdd7ab4d4378309c2c | [
"MIT"
] | null | null | null | import collections
from typing import Iterable, List
import torch
from torch import nn as nn
from torch.distributions import Normal
from torch.nn import ModuleList
from scvi.models.utils import one_hot
def reparameterize_gaussian(mu, var):
return Normal(mu, var.sqrt()).rsample()
class FCLayers(nn.Module):
r"""A helper class to build fully-connected layers for a neural network.
:param n_in: The dimensionality of the input
:param n_out: The dimensionality of the output
:param n_cat_list: A list containing, for each category of interest,
the number of categories. Each category will be
included using a one-hot encoding.
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:param dropout_rate: Dropout rate to apply to each of the hidden layers
:param use_batch_norm: Whether to have `BatchNorm` layers or not
:param use_relu: Whether to have `ReLU` layers or not
:param bias: Whether to learn bias in linear layers or not
"""
def __init__(
self,
n_in: int,
n_out: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
dropout_rate: float = 0.1,
use_batch_norm: bool = True,
use_relu: bool = True,
bias: bool = True,
):
super().__init__()
layers_dim = [n_in] + (n_layers - 1) * [n_hidden] + [n_out]
if n_cat_list is not None:
# n_cat = 1 will be ignored
self.n_cat_list = [n_cat if n_cat > 1 else 0 for n_cat in n_cat_list]
else:
self.n_cat_list = []
self.fc_layers = nn.Sequential(
collections.OrderedDict(
[
(
"Layer {}".format(i),
nn.Sequential(
nn.Linear(n_in + sum(self.n_cat_list), n_out, bias=bias),
# Below, 0.01 and 0.001 are the default values for `momentum` and `eps` from
# the tensorflow implementation of batch norm; we're using those settings
# here too so that the results match our old tensorflow code. The default
# setting from pytorch would probably be fine too but we haven't tested that.
nn.BatchNorm1d(n_out, momentum=0.01, eps=0.001)
if use_batch_norm
else None,
nn.ReLU() if use_relu else None,
nn.Dropout(p=dropout_rate) if dropout_rate > 0 else None,
),
)
for i, (n_in, n_out) in enumerate(
zip(layers_dim[:-1], layers_dim[1:])
)
]
)
)
def forward(self, x: torch.Tensor, *cat_list: int, instance_id: int = 0):
r"""Forward computation on ``x``.
:param x: tensor of values with shape ``(n_in,)``
:param cat_list: list of category membership(s) for this sample
:param instance_id: Use a specific conditional instance normalization (batchnorm)
:return: tensor of shape ``(n_out,)``
:rtype: :py:class:`torch.Tensor`
"""
one_hot_cat_list = [] # for generality in this list many indices useless.
assert len(self.n_cat_list) <= len(
cat_list
), "nb. categorical args provided doesn't match init. params."
for n_cat, cat in zip(self.n_cat_list, cat_list):
assert not (
n_cat and cat is None
), "cat not provided while n_cat != 0 in init. params."
if n_cat > 1: # n_cat = 1 will be ignored - no additional information
if cat.size(1) != n_cat:
one_hot_cat = one_hot(cat, n_cat)
else:
one_hot_cat = cat # cat has already been one_hot encoded
one_hot_cat_list += [one_hot_cat]
for layers in self.fc_layers:
for layer in layers:
if layer is not None:
if isinstance(layer, nn.BatchNorm1d):
if x.dim() == 3:
x = torch.cat(
[(layer(slice_x)).unsqueeze(0) for slice_x in x], dim=0
)
else:
x = layer(x)
else:
if isinstance(layer, nn.Linear):
if x.dim() == 3:
one_hot_cat_list = [
o.unsqueeze(0).expand(
(x.size(0), o.size(0), o.size(1))
)
for o in one_hot_cat_list
]
x = torch.cat((x, *one_hot_cat_list), dim=-1)
x = layer(x)
return x
# Encoder
class Encoder(nn.Module):
r"""Encodes data of ``n_input`` dimensions into a latent space of ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
:param n_input: The dimensionality of the input (data space)
:param n_output: The dimensionality of the output (latent space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:dropout_rate: Dropout rate to apply to each of the hidden layers
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
dropout_rate: float = 0.1,
):
super().__init__()
self.encoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.mean_encoder = nn.Linear(n_hidden, n_output)
self.var_encoder = nn.Linear(n_hidden, n_output)
def forward(self, x: torch.Tensor, *cat_list: int):
r"""The forward computation for a single sample.
#. Encodes the data into latent space using the encoder network
#. Generates a mean \\( q_m \\) and variance \\( q_v \\) (clamped to \\( [-5, 5] \\))
#. Samples a new value from an i.i.d. multivariate normal \\( \\sim N(q_m, \\mathbf{I}q_v) \\)
:param x: tensor with shape (n_input,)
:param cat_list: list of category membership(s) for this sample
:return: tensors of shape ``(n_latent,)`` for mean and var, and sample
:rtype: 3-tuple of :py:class:`torch.Tensor`
"""
# Parameters for latent distribution
q = self.encoder(x, *cat_list)
q_m = self.mean_encoder(q)
q_v = torch.exp(self.var_encoder(q)) + 1e-4
latent = reparameterize_gaussian(q_m, q_v)
return q_m, q_v, latent
# Multi-Encoder
class Multi_Encoder(nn.Module):
def __init__(
self,
RNA_input: int,
ATAC_input,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
dropout_rate: float = 0.1,
):
super().__init__()
self.scRNA_encoder = FCLayers(
n_in=RNA_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.scATAC_encoder = FCLayers(
n_in=ATAC_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.concat1 = nn.Linear(2 * n_hidden, n_hidden)
self.concat2 = nn.Linear(n_hidden, n_hidden)
self.mean_encoder = nn.Linear(n_hidden, n_output)
self.var_encoder = nn.Linear(n_hidden, n_output)
def forward(self, x: list, *cat_list: int):
# Parameters for latent distribution
if x.__len__() != 2:
raise ValueError("Input training data should be 2 data types(RNA and ATAC),"
"but input was only {}.format(x.__len__())"
)
if not torch.is_tensor(x[0]):
raise ValueError("training data should be a tensor!"
)
q1 = self.scRNA_encoder(x[0], *cat_list)
q2 = self.scATAC_encoder(x[1], *cat_list)
q = self.concat2(self.concat1(torch.cat((q1, q2), 1)))
q_m = self.mean_encoder(q)
q_v = torch.exp(self.var_encoder(q)) + 1e-4
latent = reparameterize_gaussian(q_m, q_v)
return q_m, q_v, latent
# Multi-Encoder
class Multi_Decoder(nn.Module):
def __init__(
self,
n_input: int,
RNA_output: int,
ATAC_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 256,
dropout_rate: float = 0,
):
super().__init__()
# RNA-seq decoder
self.scRNA_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# mean gamma
self.rna_scale_decoder = nn.Sequential(
nn.Linear(n_hidden, RNA_output), nn.Softmax(dim=-1)
)
# dispersion: here we only deal with gene-cell dispersion case
self.rna_r_decoder = nn.Linear(n_hidden, RNA_output)
# dropout
self.rna_dropout_decoder = nn.Linear(n_hidden, RNA_output)
# ATAC decoder
self.scATAC_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# mean possion
self.atac_scale_decoder = nn.Sequential(
nn.Linear(n_hidden, ATAC_output), nn.Softmax(dim=-1)
)
# dispersion: here we only deal with gene-cell dispersion case
self.atac_r_decoder = nn.Linear(n_hidden, ATAC_output)
# dropout
self.atac_dropout_decoder = nn.Linear(n_hidden, ATAC_output)
# libaray scale for each cell
self.libaray_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.libaray_rna_scale_decoder = nn.Linear(n_hidden, 1)
self.libaray_atac_scale_decoder = nn.Linear(n_hidden, 1)
def forward(self, z: torch.Tensor, z_c: torch.Tensor, *cat_list: int):
# The decoder returns values for the parameters of the ZINB distribution of scRNA-seq
p_rna = self.scRNA_decoder(z, *cat_list)
p_rna_scale = self.rna_scale_decoder(p_rna)
p_rna_dropout = self.rna_dropout_decoder(p_rna)
libaray_temp = self.libaray_decoder(z_c, *cat_list)
libaray_gene = self.libaray_rna_scale_decoder(libaray_temp)
p_rna_rate = torch.exp(libaray_gene.clamp(max=12)) * p_rna_scale # torch.clamp( , max=12)
#p_rna_rate.clamp(max=12) # maybe it is unnecessary
p_rna_r = self.rna_r_decoder(p_rna)
# The decoder returns values for the parameters of the ZIP distribution of scATAC-seq
p_atac = self.scATAC_decoder(z, *cat_list)
p_atac_scale = self.atac_scale_decoder(p_atac)
p_atac_r = self.atac_r_decoder(p_atac)
p_atac_dropout = self.atac_dropout_decoder(p_atac)
libaray_atac = self.libaray_atac_scale_decoder(libaray_temp)
p_atac_mean = torch.exp(libaray_atac.clamp(13)) * p_atac_scale # for zinp and zip loss
#p_atac_mean = libaray_atac * p_atac_scale # for binary loss
return p_rna_scale, p_rna_r, p_rna_rate, p_rna_dropout, p_atac_scale, p_atac_r, p_atac_mean, p_atac_dropout
# Decoder
class DecoderSCVI(nn.Module):
r"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
:param n_input: The dimensionality of the input (latent space)
:param n_output: The dimensionality of the output (data space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:param dropout_rate: Dropout rate to apply to each of the hidden layers
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__()
self.px_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=0,
)
# mean gamma
self.px_scale_decoder = nn.Sequential(
nn.Linear(n_hidden, n_output), nn.Softmax(dim=-1)
)
# dispersion: here we only deal with gene-cell dispersion case
self.px_r_decoder = nn.Linear(n_hidden, n_output)
# dropout
self.px_dropout_decoder = nn.Linear(n_hidden, n_output)
def forward(
self, dispersion: str, z: torch.Tensor, library: torch.Tensor, *cat_list: int
):
r"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns parameters for the ZINB distribution of expression
#. If ``dispersion != 'gene-cell'`` then value for that param will be ``None``
:param dispersion: One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
:param z: tensor with shape ``(n_input,)``
:param library: library size
:param cat_list: list of category membership(s) for this sample
:return: parameters for the ZINB distribution of expression
:rtype: 4-tuple of :py:class:`torch.Tensor`
"""
# The decoder returns values for the parameters of the ZINB distribution
px = self.px_decoder(z, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
# Clamp to high value: exp(12) ~ 160000 to avoid nans (computational stability)
px_rate = torch.exp(library) * px_scale # torch.clamp( , max=12)
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
return px_scale, px_r, px_rate, px_dropout
class LinearDecoderSCVI(nn.Module):
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super(LinearDecoderSCVI, self).__init__()
# mean gamma
self.n_batches = n_cat_list[0] # Just try a simple case for now
if self.n_batches > 1:
self.batch_regressor = nn.Linear(self.n_batches - 1, n_output, bias=False)
else:
self.batch_regressor = None
self.factor_regressor = nn.Linear(n_input, n_output)
# dropout
self.px_dropout_decoder = nn.Linear(n_input, n_output)
def forward(
self, dispersion: str, z: torch.Tensor, library: torch.Tensor, *cat_list: int
):
# The decoder returns values for the parameters of the ZINB distribution
p1_ = self.factor_regressor(z)
if self.n_batches > 1:
one_hot_cat = one_hot(cat_list[0], self.n_batches)[:, :-1]
p2_ = self.batch_regressor(one_hot_cat)
raw_px_scale = p1_ + p2_
else:
raw_px_scale = p1_
px_scale = torch.softmax(raw_px_scale, dim=-1)
px_dropout = self.px_dropout_decoder(z)
px_rate = torch.exp(library) * px_scale
px_r = None
return px_scale, px_r, px_rate, px_dropout
# Decoder
class Decoder(nn.Module):
r"""Decodes data from latent space of ``n_input`` dimensions to ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
Output is the mean and variance of a multivariate Gaussian
:param n_input: The dimensionality of the input (latent space)
:param n_output: The dimensionality of the output (data space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:param dropout_rate: Dropout rate to apply to each of the hidden layers
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__()
self.decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=0,
)
self.mean_decoder = nn.Linear(n_hidden, n_output)
self.var_decoder = nn.Linear(n_hidden, n_output)
def forward(self, x: torch.Tensor, *cat_list: int):
r"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns tensors for the mean and variance of a multivariate distribution
:param x: tensor with shape ``(n_input,)``
:param cat_list: list of category membership(s) for this sample
:return: Mean and variance tensors of shape ``(n_output,)``
:rtype: 2-tuple of :py:class:`torch.Tensor`
"""
# Parameters for latent distribution
p = self.decoder(x, *cat_list)
p_m = self.mean_decoder(p)
p_v = torch.exp(self.var_decoder(p))
return p_m, p_v
class MultiEncoder(nn.Module):
def __init__(
self,
n_heads: int,
n_input_list: List[int],
n_output: int,
n_hidden: int = 128,
n_layers_individual: int = 1,
n_layers_shared: int = 2,
n_cat_list: Iterable[int] = None,
dropout_rate: float = 0.1,
):
super().__init__()
self.encoders = ModuleList(
[
FCLayers(
n_in=n_input_list[i],
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers_individual,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
use_batch_norm=True,
)
for i in range(n_heads)
]
)
self.encoder_shared = FCLayers(
n_in=n_hidden,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers_shared,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.mean_encoder = nn.Linear(n_hidden, n_output)
self.var_encoder = nn.Linear(n_hidden, n_output)
def forward(self, x: torch.Tensor, head_id: int, *cat_list: int):
q = self.encoders[head_id](x, *cat_list)
q = self.encoder_shared(q, *cat_list)
q_m = self.mean_encoder(q)
q_v = torch.exp(self.var_encoder(q))
latent = reparameterize_gaussian(q_m, q_v)
return q_m, q_v, latent
class MultiDecoder(nn.Module):
def __init__(
self,
n_input: int,
n_output: int,
n_hidden_conditioned: int = 32,
n_hidden_shared: int = 128,
n_layers_conditioned: int = 1,
n_layers_shared: int = 1,
n_cat_list: Iterable[int] = None,
dropout_rate: float = 0.2,
):
super().__init__()
n_out = n_hidden_conditioned if n_layers_shared else n_hidden_shared
if n_layers_conditioned:
self.px_decoder_conditioned = FCLayers(
n_in=n_input,
n_out=n_out,
n_cat_list=n_cat_list,
n_layers=n_layers_conditioned,
n_hidden=n_hidden_conditioned,
dropout_rate=dropout_rate,
use_batch_norm=True,
)
n_in = n_out
else:
self.px_decoder_conditioned = None
n_in = n_input
if n_layers_shared:
self.px_decoder_final = FCLayers(
n_in=n_in,
n_out=n_hidden_shared,
n_cat_list=[],
n_layers=n_layers_shared,
n_hidden=n_hidden_shared,
dropout_rate=dropout_rate,
use_batch_norm=True,
)
n_in = n_hidden_shared
else:
self.px_decoder_final = None
self.px_scale_decoder = nn.Sequential(
nn.Linear(n_in, n_output), nn.Softmax(dim=-1)
)
self.px_r_decoder = nn.Linear(n_in, n_output)
self.px_dropout_decoder = nn.Linear(n_in, n_output)
def forward(
self,
z: torch.Tensor,
dataset_id: int,
library: torch.Tensor,
dispersion: str,
*cat_list: int
):
px = z
if self.px_decoder_conditioned:
px = self.px_decoder_conditioned(px, *cat_list, instance_id=dataset_id)
if self.px_decoder_final:
px = self.px_decoder_final(px, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
px_rate = torch.exp(library) * px_scale
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
return px_scale, px_r, px_rate, px_dropout
class DecoderTOTALVI(nn.Module):
r"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a linear decoder
:param n_input: The dimensionality of the input (latent space)
:param n_output_genes: The dimensionality of the output (gene space)
:param n_output_proteins: The dimensionality of the output (protein space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
"""
def __init__(
self,
n_input: int,
n_output_genes: int,
n_output_proteins: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 256,
dropout_rate: float = 0,
):
super().__init__()
self.n_output_genes = n_output_genes
self.n_output_proteins = n_output_proteins
super().__init__()
self.px_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# mean gamma
self.px_scale_decoder = nn.Sequential(
nn.Linear(n_hidden + n_input, n_output_genes), nn.Softmax(dim=-1)
)
# background mean first decoder
self.py_back_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# background mean parameters second decoder
self.py_back_mean_log_alpha = nn.Linear(n_hidden + n_input, n_output_proteins)
self.py_back_mean_log_beta = nn.Linear(n_hidden + n_input, n_output_proteins)
# foreground increment decoder step 1
self.py_fore_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
# foreground increment decoder step 2
self.py_fore_scale_decoder = nn.Sequential(
nn.Linear(n_hidden + n_input, n_output_proteins), nn.ReLU()
)
# dropout (mixture component for proteins, ZI probability for genes)
self.sigmoid_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.px_dropout_decoder_gene = nn.Linear(n_hidden + n_input, n_output_genes)
self.py_background_decoder = nn.Linear(n_hidden + n_input, n_output_proteins)
def forward(self, z: torch.Tensor, library_gene: torch.Tensor, *cat_list: int):
r"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns local parameters for the ZINB distribution for genes
#. Returns local parameters for the Mixture NB distribution for proteins
We use the dictionary `px_` to contain the parameters of the ZINB/NB for genes.
The rate refers to the mean of the NB, dropout refers to Bernoulli mixing parameters.
`scale` refers to the quanity upon which differential expression is performed. For genes,
this can be viewed as the mean of the underlying gamma distribution.
We use the dictionary `py_` to contain the parameters of the Mixture NB distribution for proteins.
`rate_fore` refers to foreground mean, while `rate_back` refers to background mean. `scale` refers to
foreground mean adjusted for background probability and scaled to reside in simplex.
`back_alpha` and `back_beta` are the posterior parameters for `rate_back`. `fore_scale` is the scaling
factor that enforces `rate_fore` > `rate_back`.
:param z: tensor with shape ``(n_input,)``
:param library_gene: library size
:param cat_list: list of category membership(s) for this sample
:return: parameters for the ZINB distribution of expression
:rtype: 3-tuple (first 2-tuple :py:class:`dict`, last :py:class:`torch.Tensor`)
"""
px_ = {}
py_ = {}
px = self.px_decoder(z, *cat_list)
px_cat_z = torch.cat([px, z], dim=-1)
px_["scale"] = self.px_scale_decoder(px_cat_z)
px_["rate"] = library_gene * px_["scale"]
py_back = self.py_back_decoder(z, *cat_list)
py_back_cat_z = torch.cat([py_back, z], dim=-1)
py_["back_alpha"] = self.py_back_mean_log_alpha(py_back_cat_z)
py_["back_beta"] = torch.exp(self.py_back_mean_log_beta(py_back_cat_z))
log_pro_back_mean = Normal(py_["back_alpha"], py_["back_beta"]).rsample()
py_["rate_back"] = torch.exp(log_pro_back_mean)
py_fore = self.py_fore_decoder(z, *cat_list)
py_fore_cat_z = torch.cat([py_fore, z], dim=-1)
py_["fore_scale"] = self.py_fore_scale_decoder(py_fore_cat_z) + 1
py_["rate_fore"] = py_["rate_back"] * py_["fore_scale"]
p_mixing = self.sigmoid_decoder(z, *cat_list)
p_mixing_cat_z = torch.cat([p_mixing, z], dim=-1)
px_["dropout"] = self.px_dropout_decoder_gene(p_mixing_cat_z)
py_["mixing"] = self.py_background_decoder(p_mixing_cat_z)
return (px_, py_, log_pro_back_mean)
# Encoder
class EncoderTOTALVI(nn.Module):
r"""Encodes data of ``n_input`` dimensions into a latent space of ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
:param n_input: The dimensionality of the input (data space)
:param n_output: The dimensionality of the output (latent space)
:param n_cat_list: A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
:param n_layers: The number of fully-connected hidden layers
:param n_hidden: The number of nodes per hidden layer
:dropout_rate: Dropout rate to apply to each of the hidden layers
:distribution: Distribution of the latent space, one of
* ``'normal'`` - Normal distribution
* ``'ln'`` - Logistic normal
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 2,
n_hidden: int = 256,
dropout_rate: float = 0.1,
distribution: str = "ln",
):
super().__init__()
self.encoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
)
self.z_encoder = nn.Sequential(
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(),
nn.Dropout(p=dropout_rate),
)
self.z_mean_encoder = nn.Linear(n_hidden, n_output)
self.z_var_encoder = nn.Linear(n_hidden, n_output)
self.l_gene_encoder = nn.Sequential(
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(),
nn.Dropout(p=dropout_rate),
)
self.l_gene_mean_encoder = nn.Linear(n_hidden, 1)
self.l_gene_var_encoder = nn.Linear(n_hidden, 1)
self.distribution = distribution
def identity(x):
return x
if distribution == "ln":
self.z_transformation = nn.Softmax(dim=-1)
else:
self.z_transformation = identity
self.l_transformation = torch.exp
def reparameterize_transformation(self, mu, var):
untran_z = Normal(mu, var.sqrt()).rsample()
z = self.z_transformation(untran_z)
return z, untran_z
def forward(self, data: torch.Tensor, *cat_list: int):
r"""The forward computation for a single sample.
#. Encodes the data into latent space using the encoder network
#. Generates a mean \\( q_m \\) and variance \\( q_v \\)
#. Samples a new value from an i.i.d. latent distribution
The dictionary `latent` contains the samples of the latent variables, while `untran_latent`
contains the untransformed versions of these latent variables. For example, the library size is log normally distributed,
so `untran_latent["l"]` gives the normal sample that was later exponentiated to become `latent["l"]`.
The logistic normal distribution is equivalent to applying softmax to a normal sample.
:param data: tensor with shape (n_input,)
:param cat_list: list of category membership(s) for this sample
:return: tensors of shape ``(n_latent,)`` for mean and var, and sample
:rtype: 6-tuple. First 4 of :py:class:`torch.Tensor`, next 2 are `dict` of :py:class:`torch.Tensor`
"""
# Parameters for latent distribution
q = self.encoder(data, *cat_list)
qz = self.z_encoder(q)
qz_m = self.z_mean_encoder(qz)
qz_v = torch.exp(self.z_var_encoder(qz)) + 1e-4
z, untran_z = self.reparameterize_transformation(qz_m, qz_v)
ql_gene = self.l_gene_encoder(q)
ql_m = self.l_gene_mean_encoder(ql_gene)
ql_v = torch.exp(self.l_gene_var_encoder(ql_gene)) + 1e-4
log_library_gene = torch.clamp(reparameterize_gaussian(ql_m, ql_v), max=15)
library_gene = self.l_transformation(log_library_gene)
latent = {}
untran_latent = {}
latent["z"] = z
latent["l"] = library_gene
untran_latent["z"] = untran_z
untran_latent["l"] = log_library_gene
return qz_m, qz_v, ql_m, ql_v, latent, untran_latent
| 37.708861 | 129 | 0.597516 |
794561206470cc19d601a547e1d46948926a5900 | 4,353 | py | Python | tests/test_global_linkspam.py | AntiCompositeNumber/linkspam | 12dd89e156abdb3811d863fa47e840086810897d | [
"Apache-2.0"
] | 1 | 2019-12-10T16:37:22.000Z | 2019-12-10T16:37:22.000Z | tests/test_global_linkspam.py | AntiCompositeNumber/linkspam | 12dd89e156abdb3811d863fa47e840086810897d | [
"Apache-2.0"
] | 18 | 2019-11-15T21:44:52.000Z | 2020-01-03T05:54:56.000Z | tests/test_global_linkspam.py | AntiCompositeNumber/linkspam | 12dd89e156abdb3811d863fa47e840086810897d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# SPDX-License-Identifier: Apache-2.0
# Copyright 2019 AntiCompositeNumber
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
# import requests
# import mwparserfromhell as mwph
import json
import pywikibot
import unittest.mock as mock
import inspect
import sys
import os
__dir__ = os.path.realpath(os.path.dirname(__file__)+"/..")
conf = os.path.join(__dir__, 'src/config.json')
try:
open(conf, 'r')
except FileNotFoundError:
with open(conf, 'w') as f:
json.dump({}, f)
sys.path.append(__dir__)
import src.global_linkspam as global_linkspam # noqa: E402
def test_get_sitematrix():
matrix = global_linkspam.get_sitematrix
assert inspect.isgeneratorfunction(matrix)
l_matrix = list(matrix())
assert 'https://en.wikipedia.org' in l_matrix
assert len(l_matrix) > 700
def test_check_status_closed():
checksite = {'closed': '',
'code': 'wikimania2005',
'dbname': 'wikimania2005wiki',
'lang': 'wikimania2005',
'sitename': 'Wikipedia',
'url': 'https://wikimania2005.wikimedia.org'}
assert not global_linkspam.check_status(checksite)
def test_check_status_private():
checksite = {'code': 'wikimaniateam',
'dbname': 'wikimaniateamwiki',
'lang': 'en',
'private': '',
'sitename': 'WikimaniaTeam',
'url': 'https://wikimaniateam.wikimedia.org'}
assert not global_linkspam.check_status(checksite)
def test_check_status_fishbowl():
checksite = {'code': 'nostalgia',
'dbname': 'nostalgiawiki',
'fishbowl': '',
'lang': 'nostalgia',
'sitename': 'Wikipedia',
'url': 'https://nostalgia.wikipedia.org'}
assert not global_linkspam.check_status(checksite)
def test_check_status_open():
checksite = {'url': 'https://en.wikipedia.org'}
assert global_linkspam.check_status(checksite)
def test_list_pages():
m = mock.MagicMock()
m.return_value = ['Test']
with mock.patch('pywikibot.pagegenerators.LinksearchPageGenerator', m):
output = list(global_linkspam.list_pages('site', 'example.com'))
assert len(output) == 4
calls = [mock.call('example.com', site='site', protocol='http'),
mock.call('example.com', site='site', protocol='https'),
mock.call('*.example.com', site='site', protocol='http'),
mock.call('*.example.com', site='site', protocol='https')]
assert m.mock_calls == calls
def test_run_check_true():
m = mock.MagicMock()
n = mock.MagicMock()
n.text = 'True'
m.return_value = n
with mock.patch('pywikibot.Page', m):
assert global_linkspam.run_check('', False) is None
def test_run_check_false():
m = mock.MagicMock()
n = mock.MagicMock()
n.text = 'False'
m.return_value = n
with mock.patch('pywikibot.Page', m):
with pytest.raises(pywikibot.UserBlocked):
global_linkspam.run_check('', False)
def test_run_check_nonsense():
m = mock.MagicMock()
n = mock.MagicMock()
n.text = 'Bananas'
m.return_value = n
with mock.patch('pywikibot.Page', m):
with pytest.raises(pywikibot.UserBlocked):
global_linkspam.run_check('', False)
def test_run_check_blank():
m = mock.MagicMock()
n = mock.MagicMock()
n.text = ''
m.return_value = n
with mock.patch('pywikibot.Page', m):
with pytest.raises(pywikibot.UserBlocked):
global_linkspam.run_check('', False)
def test_run_check_override():
m = mock.MagicMock()
n = mock.MagicMock()
n.text = 'False'
m.return_value = n
with mock.patch('pywikibot.Page', m):
global_linkspam.run_check('', True)
| 29.02 | 75 | 0.643694 |
7945632d2ba7e7a49ada01d7671f239ba0ff6710 | 809 | py | Python | register/widgets.py | Xowap/Maiznet | bd564d4c93eb28dc87135e9d31dad9a921ea8cf6 | [
"WTFPL"
] | 1 | 2015-05-04T09:28:14.000Z | 2015-05-04T09:28:14.000Z | register/widgets.py | Xowap/Maiznet | bd564d4c93eb28dc87135e9d31dad9a921ea8cf6 | [
"WTFPL"
] | null | null | null | register/widgets.py | Xowap/Maiznet | bd564d4c93eb28dc87135e9d31dad9a921ea8cf6 | [
"WTFPL"
] | null | null | null | ########################################################################
# vim: fileencoding=utf-8 ts=8 noexpandtab :
#
# ~~~~ Maiznet.fr ~~~~
#
# -> register/widgets.py
#
#
# Copyright 2011 Rémy Sanchez <[email protected]>
#
# This file is distributed under the terms of the WTFPL. For more
# informations, see http://sam.zoy.org/wtfpl/COPYING
########################################################################
from django.forms.widgets import TextInput
class MacInput(TextInput):
class Media:
js = ('static/js/widget-mac.js',)
def render(self, name, value, attrs=None):
cls = ['maiz-input-widget']
try:
cls.append(attrs['class'])
except TypeError:
attrs = {}
except:
pass
attrs['class'] = ' '.join(cls)
return super(MacInput, self).render(name, value, attrs)
| 23.794118 | 72 | 0.555006 |
794563607fcee9f54634e77b06ab4f3c6f5e0e76 | 1,213 | py | Python | python/convert.py | jhidalgocarrio/e2calib | 50bb1edc656a8af2c369094805a8eb5f77369834 | [
"MIT"
] | null | null | null | python/convert.py | jhidalgocarrio/e2calib | 50bb1edc656a8af2c369094805a8eb5f77369834 | [
"MIT"
] | null | null | null | python/convert.py | jhidalgocarrio/e2calib | 50bb1edc656a8af2c369094805a8eb5f77369834 | [
"MIT"
] | null | null | null | import argparse
from pathlib import Path
import conversion.format
import conversion.h5writer
if __name__ == '__main__':
parser = argparse.ArgumentParser('Convert events to h5 format to prepare for calibration.')
parser.add_argument('input_file', help='Path to file which will be converted to hdf5 format.')
parser.add_argument('--output_file', '-o', default="", help='Output path for h5 file. Default: Input path but with h5 suffix.')
parser.add_argument('--topic', '-t', default='/dvs/events', help='Topic name for events if input file is a rosbag(ROS) or pocolog(ROCK).')
args = parser.parse_args()
input_file = Path(args.input_file)
assert input_file.exists()
if args.output_file:
output_file = Path(args.output_file)
assert output_file.suffix == '.h5'
else:
output_file = Path(input_file).parent / (input_file.stem + '.h5')
assert not output_file.exists(), f"{output_file} already exists."
topic = args.topic
event_generator = conversion.format.get_generator(input_file, delta_t_ms=1000, topic=topic)
h5writer = conversion.h5writer.H5Writer(output_file)
for events in event_generator():
h5writer.add_data(events)
| 37.90625 | 142 | 0.713108 |
7945648688ff4525cef3da59e18a3b80a4232227 | 1,261 | py | Python | jacdac/sound_level/client.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-15T21:30:36.000Z | 2022-02-15T21:30:36.000Z | jacdac/sound_level/client.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | null | null | null | jacdac/sound_level/client.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-08T19:32:45.000Z | 2022-02-08T19:32:45.000Z | # Autogenerated file. Do not edit.
from jacdac.bus import Bus, SensorClient
from .constants import *
from typing import Optional
class SoundLevelClient(SensorClient):
"""
A sound level detector sensor, gives a relative indication of the sound level.
Implements a client for the `Sound level <https://microsoft.github.io/jacdac-docs/services/soundlevel>`_ service.
"""
def __init__(self, bus: Bus, role: str, *, missing_sound_level_value: float = None) -> None:
super().__init__(bus, JD_SERVICE_CLASS_SOUND_LEVEL, JD_SOUND_LEVEL_PACK_FORMATS, role)
self.missing_sound_level_value = missing_sound_level_value
@property
def sound_level(self) -> Optional[float]:
"""
The sound level detected by the microphone, _: /
"""
self.refresh_reading()
return self.register(JD_SOUND_LEVEL_REG_SOUND_LEVEL).float_value(self.missing_sound_level_value, 100)
@property
def enabled(self) -> Optional[bool]:
"""
Turn on or off the microphone.,
"""
return self.register(JD_SOUND_LEVEL_REG_ENABLED).bool_value()
@enabled.setter
def enabled(self, value: bool) -> None:
self.register(JD_SOUND_LEVEL_REG_ENABLED).set_values(value)
| 32.333333 | 117 | 0.69548 |
79456489c9193c254158a63c4be18a6eed3b0c0a | 3,446 | py | Python | yggdrasil/metaschema/datatypes/tests/test_JSONArrayMetaschemaType.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | null | null | null | yggdrasil/metaschema/datatypes/tests/test_JSONArrayMetaschemaType.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | null | null | null | yggdrasil/metaschema/datatypes/tests/test_JSONArrayMetaschemaType.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | null | null | null | import copy
import numpy as np
from yggdrasil import serialize
from yggdrasil.tests import assert_equal
from yggdrasil.metaschema.datatypes.JSONArrayMetaschemaType import (
JSONArrayMetaschemaType)
from yggdrasil.metaschema.datatypes.tests import test_MetaschemaType as parent
from yggdrasil.metaschema.datatypes.tests import (
test_ContainerMetaschemaType as container_utils)
def test_coerce():
r"""Test serialization of coerced types."""
typedef = {'type': 'array', 'items': [{'type': '1darray',
'subtype': 'float',
'title': 'a',
'precision': 64}]}
x = JSONArrayMetaschemaType(**typedef)
key_order = ['a']
msg_recv = [np.zeros(3, 'float64')]
msg_send_list = [msg_recv[0],
serialize.list2numpy(msg_recv, names=key_order),
serialize.list2pandas(msg_recv, names=key_order),
serialize.list2dict(msg_recv, names=key_order)]
def do_send_recv(msg_send):
msg_seri = x.serialize(msg_send, tyepdef=typedef, key_order=key_order)
assert_equal(x.deserialize(msg_seri)[0], msg_recv)
for y in msg_send_list:
do_send_recv(y)
class TestJSONArrayMetaschemaType(parent.TestMetaschemaType):
r"""Test class for JSONArrayMetaschemaType class with float."""
_mod = 'JSONArrayMetaschemaType'
_cls = 'JSONArrayMetaschemaType'
def __init__(self, *args, **kwargs):
super(TestJSONArrayMetaschemaType, self).__init__(*args, **kwargs)
self._value = []
self._fulldef = {'type': self.import_cls.name,
'items': []}
self._typedef = {'items': []}
for i in range(container_utils._count):
self._value.append(container_utils._vallist[i])
self._fulldef['items'].append(container_utils._deflist[i])
self._typedef['items'].append(container_utils._typedef[i])
self._valid_encoded = [self._fulldef]
self._valid_decoded = [self._value]
self._invalid_encoded += [{'type': self._fulldef['type'],
'items': [self._fulldef['items'][0]]}]
self._invalid_encoded.append(copy.deepcopy(self._fulldef))
del self._invalid_encoded[-1]['items'][0]['type']
self._invalid_encoded.append(copy.deepcopy(self._fulldef))
self._invalid_encoded[-1]['items'][0]['type'] = 'invalid'
self._compatible_objects = [(self._value, self._value, None)]
self._valid_normalize += [('1, 1 ', ['1', '1'])]
def test_encode_data_readable(self):
r"""Test corner case of encode_data_readable."""
self.import_cls.encode_data_readable(['1', '1'], {})
def test_container_errors(self):
r"""Test errors on container operations."""
self.assert_raises(RuntimeError, self.import_cls._assign, [], 10, None)
def test_item_dictionary(self):
r"""Test dictionary as items value."""
x = [1, 2, 3]
typedef = {'type': 'array', 'items': {'type': 'int'}}
self.import_cls.validate_instance(x, typedef)
self.import_cls.encode_data(x, typedef)
def test_validate_errors(self):
r"""Test error on validation of non-structured array."""
self.assert_raises(ValueError, self.import_cls.validate,
np.zeros(5), raise_errors=True)
| 43.075 | 79 | 0.625943 |
794564a7c1a1eaa79f5ecfe81c295793c0a54847 | 17,725 | py | Python | Lib/test/test_syntax.py | whtsky/python | 715a6e5035bb21ac49382772076ec4c630d6e960 | [
"PSF-2.0"
] | 2 | 2018-12-22T08:20:13.000Z | 2020-06-24T02:48:52.000Z | Lib/test/test_syntax.py | whtsky/python | 715a6e5035bb21ac49382772076ec4c630d6e960 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_syntax.py | whtsky/python | 715a6e5035bb21ac49382772076ec4c630d6e960 | [
"PSF-2.0"
] | 3 | 2018-03-06T05:12:17.000Z | 2021-04-22T10:01:01.000Z | """This module tests SyntaxErrors.
Here's an example of the sort of thing that is tested.
>>> def f(x):
... global x
Traceback (most recent call last):
SyntaxError: name 'x' is local and global (<doctest test.test_syntax[0]>, line 1)
The tests are all raise SyntaxErrors. They were created by checking
each C call that raises SyntaxError. There are several modules that
raise these exceptions-- ast.c, compile.c, future.c, pythonrun.c, and
symtable.c.
The parser itself outlaws a lot of invalid syntax. None of these
errors are tested here at the moment. We should add some tests; since
there are infinitely many programs with invalid syntax, we would need
to be judicious in selecting some.
The compiler generates a synthetic module name for code executed by
doctest. Since all the code comes from the same module, a suffix like
[1] is appended to the module name, As a consequence, changing the
order of tests in this module means renumbering all the errors after
it. (Maybe we should enable the ellipsis option for these tests.)
In ast.c, syntax errors are raised by calling ast_error().
Errors from set_context():
>>> obj.None = 1
Traceback (most recent call last):
SyntaxError: cannot assign to None (<doctest test.test_syntax[1]>, line 1)
>>> None = 1
Traceback (most recent call last):
SyntaxError: cannot assign to None (<doctest test.test_syntax[2]>, line 1)
It's a syntax error to assign to the empty tuple. Why isn't it an
error to assign to the empty list? It will always raise some error at
runtime.
>>> () = 1
Traceback (most recent call last):
SyntaxError: can't assign to () (<doctest test.test_syntax[3]>, line 1)
>>> f() = 1
Traceback (most recent call last):
SyntaxError: can't assign to function call (<doctest test.test_syntax[4]>, line 1)
>>> del f()
Traceback (most recent call last):
SyntaxError: can't delete function call (<doctest test.test_syntax[5]>, line 1)
>>> a + 1 = 2
Traceback (most recent call last):
SyntaxError: can't assign to operator (<doctest test.test_syntax[6]>, line 1)
>>> (x for x in x) = 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression (<doctest test.test_syntax[7]>, line 1)
>>> 1 = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal (<doctest test.test_syntax[8]>, line 1)
>>> "abc" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal (<doctest test.test_syntax[9]>, line 1)
>>> `1` = 1
Traceback (most recent call last):
SyntaxError: can't assign to repr (<doctest test.test_syntax[10]>, line 1)
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: can't assign to literal (<doctest test.test_syntax[11]>, line 1)
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
SyntaxError: can't assign to operator (<doctest test.test_syntax[12]>, line 1)
>>> a if 1 else b = 1
Traceback (most recent call last):
SyntaxError: can't assign to conditional expression (<doctest test.test_syntax[13]>, line 1)
From compiler_complex_args():
>>> def f(None=1):
... pass
Traceback (most recent call last):
SyntaxError: cannot assign to None (<doctest test.test_syntax[14]>, line 1)
From ast_for_arguments():
>>> def f(x, y=1, z):
... pass
Traceback (most recent call last):
SyntaxError: non-default argument follows default argument (<doctest test.test_syntax[15]>, line 1)
>>> def f(x, None):
... pass
Traceback (most recent call last):
SyntaxError: cannot assign to None (<doctest test.test_syntax[16]>, line 1)
>>> def f(*None):
... pass
Traceback (most recent call last):
SyntaxError: cannot assign to None (<doctest test.test_syntax[17]>, line 1)
>>> def f(**None):
... pass
Traceback (most recent call last):
SyntaxError: cannot assign to None (<doctest test.test_syntax[18]>, line 1)
From ast_for_funcdef():
>>> def None(x):
... pass
Traceback (most recent call last):
SyntaxError: cannot assign to None (<doctest test.test_syntax[19]>, line 1)
From ast_for_call():
>>> def f(it, *varargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(x for x in L, 1)
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized if not sole argument (<doctest test.test_syntax[23]>, line 1)
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... i244, i245, i246, i247, i248, i249, i250, i251, i252,
... i253, i254, i255)
Traceback (most recent call last):
SyntaxError: more than 255 arguments (<doctest test.test_syntax[25]>, line 1)
The actual error cases counts positional arguments, keyword arguments,
and generator expression arguments separately. This test combines the
three.
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
... i252=1, i253=1, i254=1, i255=1)
Traceback (most recent call last):
SyntaxError: more than 255 arguments (<doctest test.test_syntax[26]>, line 1)
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
SyntaxError: lambda cannot contain assignment (<doctest test.test_syntax[27]>, line 1)
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression (<doctest test.test_syntax[28]>, line 1)
>>> f(a or b=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression (<doctest test.test_syntax[29]>, line 1)
>>> f(x.y=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression (<doctest test.test_syntax[30]>, line 1)
From ast_for_expr_stmt():
>>> (x for x in x) += 1
Traceback (most recent call last):
SyntaxError: augmented assignment to generator expression not possible (<doctest test.test_syntax[31]>, line 1)
>>> None += 1
Traceback (most recent call last):
SyntaxError: cannot assign to None (<doctest test.test_syntax[32]>, line 1)
>>> f() += 1
Traceback (most recent call last):
SyntaxError: illegal expression for augmented assignment (<doctest test.test_syntax[33]>, line 1)
Test continue in finally in weird combinations.
continue in for loop under finally shouuld be ok.
>>> def test():
... try:
... pass
... finally:
... for abc in range(10):
... continue
... print abc
>>> test()
9
Start simple, a continue in a finally should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[36]>, line 6)
This is essentially a continue in a finally which should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... try:
... continue
... except:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[37]>, line 7)
>>> def foo():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[38]>, line 5)
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[39]>, line 6)
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... try:
... continue
... finally:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[40]>, line 7)
>>> def foo():
... for a in ():
... try: pass
... finally:
... try:
... pass
... except:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[41]>, line 8)
There is one test for a break that is not in a loop. The compiler
uses a single data structure to keep track of try-finally and loops,
so we need to be sure that a break is actually inside a loop. If it
isn't, there should be a syntax error.
>>> try:
... print 1
... break
... print 2
... finally:
... print 3
Traceback (most recent call last):
...
SyntaxError: 'break' outside loop (<doctest test.test_syntax[42]>, line 3)
This should probably raise a better error than a SystemError (or none at all).
In 2.5 there was a missing exception and an assert was triggered in a debug
build. The number of blocks must be greater than CO_MAXBLOCKS. SF #1565514
>>> while 1:
... while 2:
... while 3:
... while 4:
... while 5:
... while 6:
... while 8:
... while 9:
... while 10:
... while 11:
... while 12:
... while 13:
... while 14:
... while 15:
... while 16:
... while 17:
... while 18:
... while 19:
... while 20:
... while 21:
... while 22:
... break
Traceback (most recent call last):
...
SystemError: too many statically nested blocks
This tests assignment-context; there was a bug in Python 2.5 where compiling
a complex 'if' (one with 'elif') would fail to notice an invalid suite,
leading to spurious errors.
>>> if 1:
... x() = 1
... elif 1:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[44]>, line 2)
>>> if 1:
... pass
... elif 1:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[45]>, line 4)
>>> if 1:
... x() = 1
... elif 1:
... pass
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[46]>, line 2)
>>> if 1:
... pass
... elif 1:
... x() = 1
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[47]>, line 4)
>>> if 1:
... pass
... elif 1:
... pass
... else:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[48]>, line 6)
>>> f(a=23, a=234)
Traceback (most recent call last):
...
SyntaxError: keyword argument repeated (<doctest test.test_syntax[49]>, line 1)
"""
import re
import unittest
import warnings
from test import test_support
class SyntaxTestCase(unittest.TestCase):
def _check_error(self, code, errtext,
filename="<testcase>", mode="exec", subclass=None):
"""Check that compiling code raises SyntaxError with errtext.
errtest is a regular expression that must be present in the
test of the exception raised. If subclass is specified it
is the expected subclass of SyntaxError (e.g. IndentationError).
"""
try:
compile(code, filename, mode)
except SyntaxError, err:
if subclass and not isinstance(err, subclass):
self.fail("SyntaxError is not a %s" % subclass.__name__)
mo = re.search(errtext, str(err))
if mo is None:
self.fail("SyntaxError did not contain '%r'" % (errtext,))
else:
self.fail("compile() did not raise SyntaxError")
def test_assign_call(self):
self._check_error("f() = 1", "assign")
def test_assign_del(self):
self._check_error("del f()", "delete")
def test_global_err_then_warn(self):
# Bug tickler: The SyntaxError raised for one global statement
# shouldn't be clobbered by a SyntaxWarning issued for a later one.
source = re.sub('(?m)^ *:', '', """\
:def error(a):
: global a # SyntaxError
:def warning():
: b = 1
: global b # SyntaxWarning
:""")
warnings.filterwarnings(action='ignore', category=SyntaxWarning)
self._check_error(source, "global")
warnings.filters.pop(0)
def test_break_outside_loop(self):
self._check_error("break", "outside loop")
def test_delete_deref(self):
source = re.sub('(?m)^ *:', '', """\
:def foo(x):
: def bar():
: print x
: del x
:""")
self._check_error(source, "nested scope")
def test_unexpected_indent(self):
self._check_error("foo()\n bar()\n", "unexpected indent",
subclass=IndentationError)
def test_no_indent(self):
self._check_error("if 1:\nfoo()", "expected an indented block",
subclass=IndentationError)
def test_bad_outdent(self):
self._check_error("if 1:\n foo()\n bar()",
"unindent does not match .* level",
subclass=IndentationError)
def test_kwargs_last(self):
self._check_error("int(base=10, '2')", "non-keyword arg")
def test_main():
test_support.run_unittest(SyntaxTestCase)
from test import test_syntax
test_support.run_doctest(test_syntax, verbosity=True)
if __name__ == "__main__":
test_main()
| 34.960552 | 117 | 0.59165 |
7945652ac8933277a29bd83ef86443ee21101515 | 3,094 | py | Python | test/unit/test_embeddings.py | B-Czarnetzki/speechjoey | 97b0b98137bfaf0ffe15db9de6b38e37c7fb5572 | [
"MIT"
] | null | null | null | test/unit/test_embeddings.py | B-Czarnetzki/speechjoey | 97b0b98137bfaf0ffe15db9de6b38e37c7fb5572 | [
"MIT"
] | null | null | null | test/unit/test_embeddings.py | B-Czarnetzki/speechjoey | 97b0b98137bfaf0ffe15db9de6b38e37c7fb5572 | [
"MIT"
] | null | null | null | import torch
from speechjoey.embeddings import Embeddings
from .test_helpers import TensorTestCase
class TestEmbeddings(TensorTestCase):
def setUp(self):
self.emb_size = 10
self.vocab_size = 11
self.pad_idx = 1
seed = 42
torch.manual_seed(seed)
def test_size(self):
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
self.assertEqual(emb.lut.weight.shape,
torch.Size([self.vocab_size, self.emb_size]))
def test_pad_zeros(self):
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
# pad embedding should be zeros
self.assertTensorEqual(emb.lut.weight[self.pad_idx],
torch.zeros([self.emb_size]))
def test_freeze(self):
encoder = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx,
freeze=True)
for n, p in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_forward(self):
# fix the embedding weights
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
self._fill_embeddings(emb, weights)
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
# embedding operation is just slicing from weights matrix
self.assertTensorEqual(embedded, torch.index_select(input=weights,
index=indices, dim=0))
# after embedding, representations for PAD should still be zero
self.assertTensorEqual(embedded[2], torch.zeros([self.emb_size]))
def test_scale(self):
# fix the embedding weights
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx,
scale=True)
emb.lut.weight.data = weights
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
# now scaled
self.assertTensorNotEqual(
torch.index_select(input=weights, index=indices, dim=0), embedded)
self.assertTensorEqual(
torch.index_select(input=weights, index=indices, dim=0)*
(self.emb_size**0.5), embedded)
def _fill_embeddings(self, embeddings, weights):
embeddings.lut.weight.data = weights
def _get_random_embedding_weights(self):
weights = torch.rand([self.vocab_size, self.emb_size])
weights[self.pad_idx] = torch.zeros([self.emb_size])
return weights
| 39.164557 | 78 | 0.598901 |
7945654d1ec735ac9a46276a3be2606919e78003 | 3,296 | py | Python | wallet/server/Client.py | GauthierEmilien/4PJT-SupBank-API | a30add7f9187fe35ad539f4403021b5e8631a67a | [
"MIT"
] | null | null | null | wallet/server/Client.py | GauthierEmilien/4PJT-SupBank-API | a30add7f9187fe35ad539f4403021b5e8631a67a | [
"MIT"
] | null | null | null | wallet/server/Client.py | GauthierEmilien/4PJT-SupBank-API | a30add7f9187fe35ad539f4403021b5e8631a67a | [
"MIT"
] | null | null | null | from queue import Queue
from threading import RLock
from threading import Thread
from typing import List
import socketio
from blockchain.Blockchain import Blockchain
from gui import GUI
lock = RLock()
queue = Queue()
class Client(Thread):
nodes_info: List[dict] = []
connected_nodes: List[dict] = []
block_is_valid: List[bool] = []
block_is_valid_queue = Queue()
def __init__(self, server_ip: str, thread_name=None, blockchain: Blockchain = None, parent: GUI = None):
Thread.__init__(self, name=thread_name)
self.__sio = socketio.Client()
self.__server_ip = server_ip # ip du server ip
self.__node_ip = '' # ip du node actuel
self.__setup_callbacks()
self.__blockchain = blockchain
self.parent = parent
@classmethod
def send_to_every_nodes(cls, host: str, topic: str, data, disconnect=True, wait=False):
with lock:
for node in Client.nodes_info:
if node.get('host') != host:
try:
client = Client(node.get('host'))
client.start()
client.join()
client.send_message(topic, data, disconnect)
if wait:
client.wait()
except Exception as e:
print('error => {}'.format(e))
def __setup_callbacks(self):
self.__sio.on('connect', self.__on_connect)
self.__sio.on('disconnect', self.__on_disconnect)
self.__sio.on('nodes', self.__on_nodes)
self.__sio.on('ip', self.__on_ip)
self.__sio.on('blockchain', self.__on_blockchain)
self.__sio.on('block', self.__on_block)
def __on_connect(self):
print('\nCONNECT TO => {}'.format(self.__server_ip))
def __on_disconnect(self):
print('\nDISCONNECT FROM => {}'.format(self.__server_ip))
def __on_nodes(self, nodes):
with lock:
Client.nodes_info = nodes
print('\nNODES =>', Client.nodes_info)
def __on_ip(self, ip):
self.__node_ip = ip
def __on_blockchain(self, blocks: List[dict]):
if len(blocks) > len(self.__blockchain.get_blocks()):
self.__blockchain.update_all(blocks)
self.__blockchain.get_update()
self.__disconnect()
def __on_block(self, is_valid: str):
print('GET VALIDATION =>', is_valid)
Client.block_is_valid_queue.put(is_valid)
Client.block_is_valid_queue.task_done()
# Client.block_is_valid.append(is_valid)
self.__disconnect()
def __disconnect(self):
self.__sio.disconnect()
def run(self):
self.is_connected()
def is_connected(self):
try:
self.__sio.connect('http://{}:8000'.format(self.__server_ip))
return True
except:
return False
def send_message(self, topic: str, data=None, disconnect: bool = True):
callback = self.__disconnect if disconnect else None
self.__sio.emit(topic, data, callback=callback)
def wait(self):
self.__sio.wait()
def get_node_ip(self):
return self.__node_ip
def set_server_ip(self, server_ip: str):
self.__server_ip = server_ip
| 31.390476 | 108 | 0.604369 |
794565994fadd21c00691f17243b621fbe5990e8 | 2,861 | py | Python | netboot/settings.py | tfmt/netboot | abd690d463dfd14488b0d295512d61ce5c5bc97d | [
"Apache-2.0"
] | null | null | null | netboot/settings.py | tfmt/netboot | abd690d463dfd14488b0d295512d61ce5c5bc97d | [
"Apache-2.0"
] | null | null | null | netboot/settings.py | tfmt/netboot | abd690d463dfd14488b0d295512d61ce5c5bc97d | [
"Apache-2.0"
] | null | null | null | """
Django settings for netboot project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '75-w)c0q^)w51ajtdwoeb%m1qbqdfvqla#6#uem7-=(igtjca@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dist',
'my',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'netboot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'netboot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/assets/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
AUTH_USER_MODEL = 'my.User'
try:
from netboot.local_settings import *
except ImportError:
pass
| 24.663793 | 71 | 0.694512 |
794567015bbbf6a8fe8438be68b19430a479750f | 2,229 | py | Python | hashTable/containsDuplicateII.py | Jiganesh/High-On-DSA | 044a3941cec443a87e79d97962551d75a9639a57 | [
"MIT"
] | 76 | 2021-12-12T08:42:20.000Z | 2022-03-31T19:48:46.000Z | hashTable/containsDuplicateII.py | Jiganesh/High-On-DSA | 044a3941cec443a87e79d97962551d75a9639a57 | [
"MIT"
] | 4 | 2022-01-04T09:58:39.000Z | 2022-03-30T17:00:39.000Z | hashTable/containsDuplicateII.py | Jiganesh/High-On-DSA | 044a3941cec443a87e79d97962551d75a9639a57 | [
"MIT"
] | 13 | 2021-12-12T14:44:41.000Z | 2022-03-10T14:08:20.000Z | # https://leetcode.com/problems/contains-duplicate-ii/
class Solution:
# Runtime: 720 ms, faster than 65.63% of Python3 online submissions for Contains Duplicate II.
# Memory Usage: 32.1 MB, less than 9.33% of Python3 online submissions for Contains Duplicate II.
def containsNearbyDuplicate(self, nums,k) -> bool:
dictionary = {}
for i in range (len(nums)):
if nums[i] in dictionary :
dictionary[nums[i]].append(i)
else :
dictionary[nums[i]] =[i]
for i in dictionary :
if len(dictionary[i]) >1:
for j in range (len(dictionary[i])-1):
if abs(dictionary[i][j+1]-dictionary[i][j]) <=k:
return True
return False
# Above Solution Optimized
# Runtime: 632 ms, faster than 89.52% of Python3 online submissions for Contains Duplicate II.
# Memory Usage: 27.3 MB, less than 23.83% of Python3 online submissions for Contains Duplicate II.
def containsNearbyDuplicate(self, nums, k) -> bool:
dictionary = {}
for i in range (len(nums)):
if nums[i] in dictionary and abs(dictionary[nums[i]]-i) <=k:
dictionary[nums[i]]=i
return True
else :
dictionary[nums[i]] = i
return False
# Same as Above but Aesthetic Code
def containsNearbyDuplicate(self, nums, k) -> bool:
dictionary = {}
for i in range (len(nums)):
if nums[i] in dictionary and abs(dictionary[nums[i]]-i) <=k:
return True
dictionary[nums[i]] = i
return False
# Rolling Window Approach (IMP)
def containsNearbyDuplicate(self, nums, k) -> bool:
rolling_window = set()
for idx, num in enumerate(nums):
if idx > k:
rolling_window.remove(nums[idx-k-1])
if num in rolling_window:
return True
rolling_window.add(num)
return False
print(Solution().containsNearbyDuplicate([1,2,3,1]))
| 33.268657 | 102 | 0.536563 |
7945677dbb3035e2cbfa4eec17962ac52896e77e | 31,205 | py | Python | jumeg/decompose/group_ica.py | fboers/jumeg | e04896989faf72f4dbe7adf136e4d158d212f24a | [
"BSD-3-Clause"
] | 6 | 2015-04-10T07:13:07.000Z | 2021-12-12T04:04:37.000Z | jumeg/decompose/group_ica.py | fboers/jumeg | e04896989faf72f4dbe7adf136e4d158d212f24a | [
"BSD-3-Clause"
] | 112 | 2015-01-07T10:19:24.000Z | 2022-02-01T15:48:16.000Z | jumeg/decompose/group_ica.py | fboers/jumeg | e04896989faf72f4dbe7adf136e4d158d212f24a | [
"BSD-3-Clause"
] | 22 | 2015-03-11T12:19:50.000Z | 2021-11-20T04:24:42.000Z | # Authors: Lukas Breuer <[email protected]>
"""
----------------------------------------------------------------------
--- jumeg.decompose.group_ica.py -------------------------------------
----------------------------------------------------------------------
author : Lukas Breuer
email : [email protected]
last update: 09.11.2016
version : 1.0
----------------------------------------------------------------------
Simple script to perform group ICA in source space
----------------------------------------------------------------------
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# define some global file ending pattern
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
img_src_group_ica = ",src_group_ICA"
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# perform FourierICA on group data in source space
# Note: here the parameters are optimized for resting
# state data
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def group_fourierICA_src_space_resting_state(fname_raw,
ica_method='fourierica', # parameter for ICA method
nrep=50, # parameter for ICASSO
src_loc_method='dSPM', snr=1.0, # parameter for inverse estimation
inv_pattern='-src-meg-fspace-inv.fif',
stim_name=None, stim_id=None, # parameter for epoch generation
corr_event_picking=None,
stim_delay=0.0,
tmin=0.0, tmax=1.0,
average=False,
flow=4., fhigh=34., # parameter for Fourier transformation
remove_outliers=True,
hamming_data=True,
dim_reduction='MDL',
pca_dim=None, cost_function='g2', # parameter for complex ICA estimation
lrate=0.2, complex_mixing=True,
conv_eps=1e-9, max_iter=5000,
envelopeICA=False,
interpolate_bads=True,
decim_epochs=None,
fnout=None, # parameter for saving the results
verbose=True):
"""
Module to perform group FourierICA on resting-state data (if wished
in combination with ICASSO --> if 'nrep'=1 only FourierICA is performed,
if 'nrep'>1 FourierICA is performed in combination with ICASSO).
For information about the parameters see
jumeg.decompose.group_ica.group_fourierICA_src_space()
"""
# call routine for group FourierICA
group_fourierICA_src_space(fname_raw, ica_method=ica_method,
nrep=nrep, src_loc_method=src_loc_method,
snr=snr, inv_pattern=inv_pattern,
stim_name=stim_name, stim_id=stim_id,
corr_event_picking=corr_event_picking,
stim_delay=stim_delay, tmin=tmin, tmax=tmax,
average=average, flow=flow, fhigh=fhigh,
remove_outliers=remove_outliers,
hamming_data=hamming_data,
dim_reduction=dim_reduction, pca_dim=pca_dim,
cost_function=cost_function, lrate=lrate,
complex_mixing=complex_mixing, conv_eps=conv_eps,
max_iter=max_iter, envelopeICA=envelopeICA,
interpolate_bads=interpolate_bads,
decim_epochs=decim_epochs, fnout=fnout,
verbose=verbose)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# perform FourierICA on group data in source space
# Note: Here the parameters are optimized for evoked
# data
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def group_fourierICA_src_space(fname_raw,
ica_method='fourierica', # parameter for ICA method
nrep=50, # parameter for ICASSO
src_loc_method='dSPM', snr=1.0, # parameter for inverse estimation
inv_pattern='-src-meg-fspace-inv.fif',
stim_name='STI 014', stim_id=1, # parameter for epoch generation
corr_event_picking=None,
stim_delay=0.0,
tmin=-0.2, tmax=0.8,
average=False,
flow=4., fhigh=34., # parameter for Fourier transformation
remove_outliers=False,
hamming_data=False,
dim_reduction='MDL',
pca_dim=None, cost_function='g2', # parameter for complex ICA estimation
lrate=0.2, complex_mixing=True,
conv_eps=1e-9, max_iter=5000,
envelopeICA=False,
interpolate_bads=True,
decim_epochs=None,
fnout=None, # parameter for saving the results
verbose=True):
"""
Module to perform group FourierICA (if wished in combination with
ICASSO --> if 'nrep'=1 only FourierICA is performed, if 'nrep'>1
FourierICA is performed in combination with ICASSO).
Parameters
----------
fname_raw: list of strings
filename(s) of the pre-processed raw file(s)
ica_method: string
which ICA method should be used for the group ICA?
You can chose between 'extended-infomax', 'fastica',
'fourierica' and 'infomax'
default: ica_method='fourierica'
nrep: integer
number of repetitions ICA, i.e. ICASSO, should be performed
default: nrep=50
src_loc_method: string
method used for source localization.
default: src_loc_method='dSPM'
snr: float
signal-to-noise ratio for performing source
localization --> for single epochs an snr of 1.0 is recommended,
if keyword 'average' is set one should use snr=3.0
default: snr=1.0
inv_pattern: string
String containing the ending pattern of the inverse
solution. Note, here fspace is used if the inverse
solution is estimated in the Fourier space for later
applying Fourier (i.e., complex) ICA
default: inv_pattern='-src-meg-fspace-inv.fif'
stim_name: string
name of the stimulus channel. Note, for
applying FourierCIA data are chopped around stimulus
onset. If not set data are chopped in overlapping
windows
default: stim_names='STI 014'
stim_id: integer or list of integers
list containing the event IDs
default: stim_id=1
corr_event_picking: string
if set should contain the complete python path and
name of the function used to identify only the correct events
default: corr_event_picking=None
stim_delay: float
Stimulus delay in seconds
default: stim_delay=0.0
tmin: float
time of interest prior to stimulus onset for epoch
generation (in seconds)
default: tmin=-0.2
tmax: float
time of interest after the stimulus onset for epoch
generation (in seconds)
default: tmax=0.8
average: bool
should data be averaged across subjects before
FourierICA application? Note, averaged data require
less memory!
default: average=False
flow: float
lower frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: flow=4.0
fhigh: float
upper frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: fhigh=34.0
Note: here default flow and fhigh are choosen to
contain:
- theta (4-7Hz)
- low (7.5-9.5Hz) and high alpha (10-12Hz),
- low (13-23Hz) and high beta (24-34Hz)
remove_outliers: If set outliers are removed from the Fourier
transformed data.
Outliers are defined as windows with large log-average power (LAP)
LAP_{c,t}=log \sum_{f}{|X_{c,tf}|^2
where c, t and f are channels, window time-onsets and frequencies,
respectively. The threshold is defined as |mean(LAP)+3 std(LAP)|.
This process can be bypassed or replaced by specifying a function
handle as an optional parameter.
remove_outliers=False
hamming_data: boolean
if set a hamming window is applied to each
epoch prior to Fourier transformation
default: hamming_data=False
dim_reduction: string {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
default: dim_reduction='MDL'
pca_dim: Integer
The number of components used for PCA decomposition.
default: pca_dim=None
cost_function: string
which cost-function should be used in the complex
ICA algorithm
'g1': g_1(y) = 1 / (2 * np.sqrt(lrate + y))
'g2': g_2(y) = 1 / (lrate + y)
'g3': g_3(y) = y
default: cost_function='g2'
lrate: float
learning rate which should be used in the applied
ICA algorithm
default: lrate=0.3
complex_mixing: bool
if mixing matrix should be real or complex
default: complex_mixing=True
conv_eps: float
iteration stop when weight changes are smaller
then this number
default: conv_eps = 1e-9
max_iter: integer
maximum number of iterations used in FourierICA
default: max_iter=5000
envelopeICA: if set ICA is estimated on the envelope
of the Fourier transformed input data, i.e., the
mixing model is |x|=As
default: envelopeICA=False
interpolate_bads: bool
if set bad channels are interpolated (using the
mne routine raw.interpolate_bads()).
default: interpolate_bads=True
decim_epochs: integer
if set the number of epochs will be reduced (per
subject) to that number for the estimation of the demixing matrix.
Note: the epochs were chosen randomly from the complete set of
epochs.
default: decim_epochs=None
fnout: string
output filename of the result structure. If not set the filename
is generated automatically.
default: fnout=None
verbose: bool, str, int, or None
If not None, override default verbose level
(see mne.verbose).
default: verbose=True
Return
------
groupICA_obj: dictionary
Group ICA information stored in a dictionary. The dictionary
has following keys:
'fn_list': List of filenames which where used to estimate the
group ICA
'W_orig': estimated de-mixing matrix
'A_orig': estimated mixing matrix
'quality': quality index of the clustering between
components belonging to one cluster
(between 0 and 1; 1 refers to small clusters,
i.e., components in one cluster have a highly similar)
'icasso_obj': ICASSO object. For further information
please have a look into the ICASSO routine
'fourier_ica_obj': FourierICA object. For further information
please have a look into the FourierICA routine
fnout: string
filename where the 'groupICA_obj' is stored
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from jumeg.decompose.icasso import JuMEG_icasso
from mne import set_log_level
import numpy as np
from os.path import dirname, join
from pickle import dump
# set log level to 'WARNING'
set_log_level('WARNING')
# ------------------------------------------
# check input parameter
# ------------------------------------------
# filenames
if isinstance(fname_raw, list):
fn_list = fname_raw
else:
fn_list = [fname_raw]
# -------------------------------------------
# set some path parameter
# -------------------------------------------
fn_inv = []
for fn_raw in fn_list:
fn_inv.append(fn_raw[:fn_raw.rfind('-raw.fif')] + inv_pattern)
# ------------------------------------------
# apply FourierICA combined with ICASSO
# ------------------------------------------
icasso_obj = JuMEG_icasso(nrep=nrep, fn_inv=fn_inv,
src_loc_method=src_loc_method,
morph2fsaverage=True,
ica_method=ica_method,
cost_function=cost_function,
dim_reduction=dim_reduction,
decim_epochs=decim_epochs,
tICA=False, snr=snr, lrate=lrate)
W_orig, A_orig, quality, fourier_ica_obj \
= icasso_obj.fit(fn_list, average=average,
stim_name=stim_name,
event_id=stim_id, pca_dim=pca_dim,
stim_delay=stim_delay,
tmin_win=tmin, tmax_win=tmax,
flow=flow, fhigh=fhigh,
max_iter=max_iter, conv_eps=conv_eps,
complex_mixing=complex_mixing,
envelopeICA=envelopeICA,
hamming_data=hamming_data,
remove_outliers=remove_outliers,
cost_function=cost_function,
interpolate_bads=interpolate_bads,
corr_event_picking=corr_event_picking,
verbose=verbose)
# ------------------------------------------
# save results to disk
# ------------------------------------------
# generate dictionary to save results
groupICA_obj = {'fn_list': fn_list,
'W_orig': W_orig,
'A_orig': A_orig,
'quality': quality,
'icasso_obj': icasso_obj,
'fourier_ica_obj': fourier_ica_obj}
# check if the output filename is already set
if not fnout:
# generate filename for output structure
if isinstance(stim_id, (list, tuple)):
fn_base = "group_FourierICA_combined"
for id in np.sort(stim_id)[::-1]:
fn_base += "_%ddB" % id
elif isinstance(stim_id, int):
fn_base = "group_FourierICA_%ddB" % stim_id
else:
fn_base = "group_ICA_resting_state.obj"
# write file to disk
fnout = join(dirname(dirname(fname_raw[0])), fn_base + ".obj")
with open(fnout, "wb") as filehandler:
dump(groupICA_obj, filehandler)
# return dictionary
return groupICA_obj, fnout
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# get time courses of FourierICA components
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_group_fourierICA_time_courses(groupICA_obj, event_id=None,
resp_id=None, stim_delay=0,
unfiltered=True,
corr_event_picking=None,
baseline=(None, None)):
"""
Module to get time courses from the FourierICA components.
Note, to save memory time courses are not saved during group
ICA estimation. However, to estimate the time courses will
take a while.
Parameters
----------
groupICA_obj: either filename of the group ICA object or an
already swiped groupICA object
event_id: Id of the event of interest to be considered in
the stimulus channel.
default: event_id=None
resp_id: Response IDs for correct event estimation. Note:
Must be in the order corresponding to the 'event_id'
default: resp_id=None
stim_delay: stimulus delay in milliseconds
default: stim_delay=0
unfiltered: if set data are not filtered prior to time-course
generation
default: unfiltered=True
corr_event_picking: if set should contain the complete python
path and name of the function used to identify only the
correct events
default: corr_event_picking=None
baseline: If set baseline correction is applied to epochs
prior to Fourier transformation
default: baseline=(None, None)
Return
------
temporal_envelope_all: list of arrays containing
the temporal envelopes.
src_loc: array
3D array containing the source localization
data used for FourierICA estimation
(nfreq x nepochs x nvoxel)
vert: list
list containing two arrays with the order
of the vertices.
sfreq: float
sampling frequency of the data
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
import numpy as np
from mne.externals.six import string_types
from scipy import fftpack
# ------------------------------------------
# test if 'groupICA_obj' is a string or
# already the swiped object
# ------------------------------------------
if isinstance(groupICA_obj, string_types):
from pickle import load
with open(groupICA_obj, "rb") as filehandler:
groupICA_obj = load(filehandler)
icasso_obj = groupICA_obj['icasso_obj']
fourier_ica_obj = groupICA_obj['fourier_ica_obj']
fn_list = groupICA_obj['fn_list']
if not isinstance(fn_list, list):
fn_list = [fn_list]
nfn_list = len(fn_list)
# ------------------------------------------
# check if FourierICA or temporal ICA was
# performed
# ------------------------------------------
if fourier_ica_obj:
average_epochs = False
hamming_data = fourier_ica_obj.hamming_data
remove_outliers = fourier_ica_obj.remove_outliers
else:
average_epochs = True
hamming_data = False
remove_outliers = False
# check if we have more than one stimulus ID
if not event_id:
event_id = icasso_obj.event_id
if not isinstance(event_id, (list, tuple)):
event_id = [event_id]
nstim = len(event_id)
temporal_envelope_all = np.empty((nstim, 0)).tolist()
# ------------------------------------------
# loop over all stimulus IDs to get time
# courses
# ------------------------------------------
for istim in range(nstim):
# get current stimulus and response ID
stim_id = event_id[istim]
id_response = resp_id[istim]
# ------------------------------------------
# loop over all files
# ------------------------------------------
for idx in range(nfn_list):
# ------------------------------------------
# transform data to source space
# ------------------------------------------
# get some parameter
fn_raw = fn_list[idx]
tmin, tmax = icasso_obj.tmin_win, icasso_obj.tmax_win
win_length_sec = (tmax - tmin)
flow, fhigh = icasso_obj.flow, icasso_obj.fhigh
_, src_loc, vert, _, _, sfreq, _ = \
icasso_obj.prepare_data_for_fit(fn_raw, stim_name=icasso_obj.stim_name,
tmin_stim=tmin, tmax_stim=tmax,
flow=flow, fhigh=fhigh,
event_id=[stim_id],
resp_id=[id_response],
stim_delay=stim_delay,
hamming_data=hamming_data,
corr_event_picking=corr_event_picking,
fn_inv=icasso_obj.fn_inv[idx],
averaged_epochs=average_epochs,
baseline=baseline,
remove_outliers=remove_outliers,
unfiltered=unfiltered)
# normalize source data
fftsize, nwindows, nvoxel = src_loc.shape
nrows_Xmat_c = fftsize*nwindows
Xmat_c = src_loc.reshape((nrows_Xmat_c, nvoxel), order='F')
dmean = np.mean(Xmat_c, axis=0).reshape((1, nvoxel))
dstd = np.std(Xmat_c, axis=0).reshape((1, nvoxel))
# -------------------------------------------
# get some parameter
# -------------------------------------------
ncomp, nvoxel = groupICA_obj['W_orig'].shape
if fourier_ica_obj:
win_ntsl = int(np.floor(sfreq * win_length_sec))
else:
win_ntsl = fftsize
startfftind = int(np.floor(flow * win_length_sec))
fft_act = np.zeros((ncomp, win_ntsl), dtype=np.complex)
# -------------------------------------------
# define result arrays
# -------------------------------------------
if idx == 0:
# we have to double the number of components as we separate the
# results for left and right hemisphere
# act = []
temporal_envelope = []
# act_cur = np.zeros((ncomp, nwindows, fftsize), dtype=np.complex)
temporal_envelope_cur = np.zeros((nwindows, ncomp, win_ntsl))
times = np.arange(win_ntsl)/sfreq + tmin
# -------------------------------------------
# loop over all epochs
# -------------------------------------------
for iepoch in range(nwindows):
# get independent components
src_loc_zero_mean = (src_loc[:, iepoch, :] - np.dot(np.ones((fftsize, 1)), dmean)) / \
np.dot(np.ones((fftsize, 1)), dstd)
# activations in both hemispheres
act = np.dot(groupICA_obj['W_orig'], src_loc_zero_mean.transpose())
# generate temporal profiles:
# apply inverse STFT to get temporal envelope
if fourier_ica_obj:
fft_act[:, startfftind:(startfftind+fftsize)] = act
temporal_envelope_cur[iepoch, :, :] = fftpack.ifft(fft_act, n=win_ntsl, axis=1).real
else:
temporal_envelope_cur[iepoch, :, :] = act.transpose([1, 0, 2])
# store data in list
temporal_envelope.append(temporal_envelope_cur)
# act.append(act_cur)
# concatenate result data
temporal_envelope = np.asarray(np.concatenate(temporal_envelope))
# act = np.concatenate(act, axis=1)
# -------------------------------------------
# collecting time courses of interest
# -------------------------------------------
temporal_envelope_all[istim].append(temporal_envelope.real)
return temporal_envelope_all, src_loc, vert, sfreq
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# plot FourierICA results
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def plot_group_fourierICA(fn_groupICA_obj,
stim_id=1, stim_delay=0,
resp_id=None,
corr_event_picking=None,
global_scaling=True,
subjects_dir=None,
bar_plot=False):
"""
Interface to plot the results from group FourierICA
Parameters
----------
fn_groupICA_obj: filename of the group ICA object
stim_id: Id of the event of interest to be considered in
the stimulus channel. Only of interest if 'stim_name'
is set
default: event_id=1
stim_delay: stimulus delay in milliseconds
default: stim_delay=0
resp_id: Response IDs for correct event estimation. Note:
Must be in the order corresponding to the 'event_id'
default: resp_id=None
corr_event_picking: string
if set should contain the complete python path and
name of the function used to identify only the correct events
default: corr_event_picking=None
subjects_dir: string
If the subjects directory is not confirm with
the system variable 'SUBJECTS_DIR' parameter should be set
default: subjects_dir=None
bar_plot: boolean
If set the results of the time-frequency analysis
are shown as bar plot. This option is recommended
when FourierICA was applied to resting-state data
default: bar_plot=False
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from jumeg.decompose.fourier_ica_plot import plot_results_src_space
from mne import set_log_level
from os.path import exists
from pickle import dump, load
# set log level to 'WARNING'
set_log_level('WARNING')
# ------------------------------------------
# read in group FourierICA object
# ------------------------------------------
with open(fn_groupICA_obj, "rb") as filehandler:
groupICA_obj = load(filehandler)
icasso_obj = groupICA_obj['icasso_obj']
win_length_sec = icasso_obj.tmax_win - icasso_obj.tmin_win
temp_profile_names = ["Event-ID %i" % i for i in groupICA_obj['icasso_obj'].event_id]
# ------------------------------------------
# check if time-courses already exist
# ------------------------------------------
fn_temporal_envelope = fn_groupICA_obj[:-4] + '_temporal_envelope.obj'
# generate time courses if they do not exist
if not exists(fn_temporal_envelope):
# import necessary modules
from jumeg.decompose.group_ica import get_group_fourierICA_time_courses
# generate time courses
temporal_envelope, src_loc, vert, sfreq = \
get_group_fourierICA_time_courses(groupICA_obj, event_id=stim_id,
stim_delay=stim_delay, resp_id=resp_id,
corr_event_picking=corr_event_picking,
unfiltered=False, baseline=(None, 0))
# save data
temp_env_obj = {'temporal_envelope': temporal_envelope,
'src_loc': src_loc, 'vert': vert, 'sfreq': sfreq}
with open(fn_temporal_envelope, "wb") as filehandler:
dump(temp_env_obj, filehandler)
# when data are stored read them in
else:
# read data in
with open(fn_temporal_envelope, "rb") as filehandler:
temp_env_obj = load(filehandler)
# get data
temporal_envelope = temp_env_obj['temporal_envelope']
src_loc = temp_env_obj['src_loc']
vert = temp_env_obj['vert']
# ------------------------------------------
# check if classification already exists
# ------------------------------------------
if 'classification' in groupICA_obj and\
'mni_coords' in groupICA_obj and\
'labels' in groupICA_obj:
classification = groupICA_obj['classification']
mni_coords = groupICA_obj['mni_coords']
labels = groupICA_obj['labels']
else:
classification = {}
mni_coords = []
labels = None
# ------------------------------------------
# plot "group" results
# ------------------------------------------
fnout_src_fourier_ica = fn_groupICA_obj[:fn_groupICA_obj.rfind('.obj')] + \
img_src_group_ica
mni_coords, classification, labels =\
plot_results_src_space(groupICA_obj['fourier_ica_obj'],
groupICA_obj['W_orig'], groupICA_obj['A_orig'],
src_loc_data=src_loc, vertno=vert,
subjects_dir=subjects_dir,
tpre=icasso_obj.tmin_win,
win_length_sec=win_length_sec,
flow=icasso_obj.flow, fhigh=icasso_obj.fhigh,
fnout=fnout_src_fourier_ica,
tICA=icasso_obj.tICA,
global_scaling=global_scaling,
temporal_envelope=temporal_envelope,
temp_profile_names=temp_profile_names,
classification=classification,
mni_coords=mni_coords, labels=labels,
bar_plot=bar_plot)
# ------------------------------------------
# adjust groupICA_obj with the new
# parameters if they didn't exist before
# ------------------------------------------
if 'classification' not in groupICA_obj and\
'mni_coords' not in groupICA_obj and\
'labels' not in groupICA_obj:
groupICA_obj['classification'] = classification
groupICA_obj['mni_coords'] = mni_coords
groupICA_obj['labels'] = labels
# write new groupICA_obj back to disk
with open(fn_groupICA_obj, "wb") as filehandler:
dump(groupICA_obj, filehandler)
| 41.662216 | 107 | 0.505913 |
794568dda21b0258b434302c486a07f25d2ccd3b | 4,123 | py | Python | viper/core/ui/cmd/open.py | acd62081/viper | 35d5a0ed3879340aa8d4ba3fc7c8927174e850e4 | [
"BSD-3-Clause"
] | 1,131 | 2015-06-25T00:52:07.000Z | 2022-03-25T11:31:08.000Z | viper/core/ui/cmd/open.py | acd62081/viper | 35d5a0ed3879340aa8d4ba3fc7c8927174e850e4 | [
"BSD-3-Clause"
] | 444 | 2015-06-26T12:01:03.000Z | 2022-02-09T14:23:26.000Z | viper/core/ui/cmd/open.py | acd62081/viper | 35d5a0ed3879340aa8d4ba3fc7c8927174e850e4 | [
"BSD-3-Clause"
] | 354 | 2015-06-25T09:33:52.000Z | 2022-03-19T07:30:09.000Z | # -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import tempfile
import argparse
from viper.common.abstracts import Command
from viper.common.network import download
from viper.core.database import Database
from viper.core.storage import get_sample_path
from viper.core.session import __sessions__
class Open(Command):
"""
This command is used to open a session on a given file.
It either can be an external file path, or a SHA256 hash of a file which
has been previously imported and stored.
While the session is active, every operation and module executed will be
run against the file specified.
"""
cmd = "open"
description = "Open a file"
fs_path_completion = True
def __init__(self):
super(Open, self).__init__()
self.parser = argparse.ArgumentParser(prog=self.cmd, description=self.description,
epilog="You can also specify a MD5 or SHA256 hash to a previously stored file in order to open a session on it.")
group = self.parser.add_mutually_exclusive_group()
group.add_argument('-f', '--file', action='store_true', help="Target is a file")
group.add_argument('-u', '--url', action='store_true', help="Target is a URL")
group.add_argument('-l', '--last', action='store_true', help="Target is the entry number from the last find command's results")
self.parser.add_argument('-t', '--tor', action='store_true', help="Download the file through Tor")
self.parser.add_argument("value", metavar='PATH, URL, HASH or ID', nargs='*', help="Target to open. Hash can be md5 or sha256. ID has to be from the last search.")
def run(self, *args):
try:
args = self.parser.parse_args(args)
except SystemExit:
return
target = " ".join(args.value)
if not args.last and target is None:
self.parser.print_usage()
return
# If it's a file path, open a session on it.
if args.file:
target = os.path.expanduser(target)
if not os.path.exists(target) or not os.path.isfile(target):
self.log('error', "File not found: {0}".format(target))
return
__sessions__.new(target)
# If it's a URL, download it and open a session on the temporary file.
elif args.url:
data = download(url=target, tor=args.tor)
if data:
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(data)
tmp.close()
__sessions__.new(tmp.name)
# Try to open the specified file from the list of results from
# the last find command.
elif args.last:
if __sessions__.find:
try:
target = int(target)
except ValueError:
self.log('warning', "Please pass the entry number from the last find to -l/--last (e.g. open -l 5)")
return
for idx, item in enumerate(__sessions__.find, start=1):
if idx == target:
__sessions__.new(get_sample_path(item.sha256))
break
else:
self.log('warning', "You haven't performed a find yet")
# Otherwise we assume it's an hash of an previously stored sample.
else:
target = target.strip().lower()
if len(target) == 32:
key = 'md5'
elif len(target) == 64:
key = 'sha256'
else:
self.parser.print_usage()
return
db = Database()
rows = db.find(key=key, value=target)
if not rows:
self.log('warning', "No file found with the given hash {0}".format(target))
return
path = get_sample_path(rows[0].sha256)
if path:
__sessions__.new(path)
| 37.825688 | 171 | 0.580645 |
794569ddef6f97d7dd392b7b0b6057db9d36c5c5 | 5,233 | py | Python | overlay/window.py | Neulus/overlay | 14f2bc0af9175d1bee7603bc3afea04cfa725727 | [
"MIT"
] | null | null | null | overlay/window.py | Neulus/overlay | 14f2bc0af9175d1bee7603bc3afea04cfa725727 | [
"MIT"
] | null | null | null | overlay/window.py | Neulus/overlay | 14f2bc0af9175d1bee7603bc3afea04cfa725727 | [
"MIT"
] | null | null | null | '''The main class for creating overlays.'''
__author__ = 'David Ma'
__all__ = [
'Window',
'overlays',
]
import tkinter as tk
overlays = []
master = tk.Tk()
master.withdraw()
class Window:
def __init__(self, **kwargs):
'''Initiate an overlay window.
All parameters of this method are cofiguration parameters and
therefore optional.
size: tuple, the dimension (width, height) of the overlay window.
position: tuple, the position of the overlay (on screen).
transparent: bool, whether to set the overlay background transparent.
alpha: float [0, 1], the alpha (transparency) of the overlay.
draggable: bool, whether the window can be dragged.
resizable: bool, whether the window can be resized with <MouseWheel>.
'''
self._root = tk.Toplevel();
'''Hide the title bar.'''
self._root.overrideredirect(1)
self._root.update_idletasks()
self._root.lift()
'''Basic configurations.'''
self.size = kwargs.get('size', (500, 250))
self.position = kwargs.get('position', (0, 0))
'''Make the background transparent.'''
self.transparent = kwargs.get('transparent', False)
'''Change the transparency of the overlay.'''
self.alpha = kwargs.get('alpha', 1)
'''Make the window draggable.'''
self.draggable = kwargs.get('draggable', True)
self._root.bind('<ButtonPress-1>', self._drag_start)
self._root.bind('<ButtonRelease-1>', self._drag_stop)
self._root.bind('<B1-Motion>', self._move)
self._drag_stop(None)
'''Make the window resizable.'''
self.resizable = kwargs.get('resizable', False)
'''Make the overlay float on top of everything.'''
self._root.wm_attributes('-topmost', True)
'''Remove the overlay's shadow.'''
self._root.wm_attributes('-transparent', "white")
'''Add self to overlay collections.'''
overlays.append(self)
def focus(self):
'''Set focus to this overlay.'''
self._root.focus_force()
def center(self, offset: tuple = 'auto', pos: tuple = None):
'''Move this overlay to the center of the screen.
offset: tuple, extra offset for the destined position.
pos: tuple, the location to center the overlay to.
By default the overlay is moved a little above the real center to a
more eye-catching location.
'''
if offset == 'auto':
offset = 0, -self._root.winfo_screenheight() // 7.5
if not pos:
center_x = self._root.winfo_screenwidth() / 2
center_y = self._root.winfo_screenheight() / 2
else:
center_x, center_y = pos
offset_x, offset_y = tuple(map(lambda x: x / 2, self.size))
new_x = center_x - offset_x + offset[0]
new_y = center_y - offset_y + offset[1]
self.position = new_x, new_y
def hide(self):
'''Hide this overlay.'''
self._root.withdraw()
def show(self):
'''Show this overlay.'''
self._root.wm_deiconify()
self._root.lift()
self._root.wm_attributes('-topmost', True)
def destroy(self):
'''Destroy this overlay.'''
self._root.destroy()
def _drag_start(self, event):
'''The start of moving this overlay.'''
self.x = event.x
self.y = event.y
def _drag_stop(self, event):
'''The start of moving the overlay.'''
self.x = None
self.y = None
def _move(self, event):
'''The handler for moving the overlay.'''
if self.draggable:
mouse_x = self._root.winfo_pointerx() - self._root.winfo_rootx()
mouse_y = self._root.winfo_pointery() - self._root.winfo_rooty()
new_x = self._root.winfo_x() + mouse_x - self.x
new_y = self._root.winfo_y() + mouse_y - self.y
self.position = new_x, new_y
@property
def root(self):
return self._root
@property
def size(self):
return self._size
@property
def position(self):
return self._position
@position.setter
def position(self, newPos):
self._position = tuple(map(lambda x: int(x), newPos))
self._root.geometry('+%s+%s'%self._position)
@size.setter
def size(self, newSize):
self._size = tuple(map(lambda x: int(x), newSize))
self._root.geometry('%sx%s'%self._size)
@property
def transparent(self):
return self._root['bg'] == 'systemTransparent'
@transparent.setter
def transparent(self, newTransparent):
bg = 'systemTransparent' if newTransparent else 'white'
self._root.config(bg=bg)
@property
def alpha(self):
return self._root.wm_attributes('-alpha')
@alpha.setter
def alpha(self, newAlpha):
self._root.wm_attributes('-alpha', newAlpha)
@property
def resizable(self):
return self._resizable
@resizable.setter
def resizable(self, newResize):
self._resizable = newResize
if self._resizable:
self._root.bind('<MouseWheel>', lambda event: print(event))
else:
self._root.unbind('<MouseWheel>')
@staticmethod
def after(milliseconds, func, *args):
'''Runs the given function with the given args after launch.'''
master.after(milliseconds, func, *args)
@staticmethod
def launch():
'''Enter the mainloop for the collection of all overlays.'''
master.mainloop()
@staticmethod
def hide_all():
'''Hide all overlays.'''
for overlay in overlays:
overlay.hide()
@staticmethod
def show_all():
'''Show all overlays.'''
for overlay in overlays:
overlay.show()
@staticmethod
def destroy_all():
'''Destroy all overlays and end the mainloop.'''
for overlay in overlays:
overlay.destroy()
master.destroy()
| 25.280193 | 71 | 0.692146 |
79456a17c12baa8bb90b64eaf7283b5c8e5469cf | 4,430 | py | Python | models/senet.py | phil-hawkins/multiplayer-alphazero | 87732e18ec0f23469f6246de54388ef9cb324575 | [
"MIT"
] | 23 | 2019-11-29T03:46:10.000Z | 2022-03-09T19:59:01.000Z | models/senet.py | phil-hawkins/multiplayer-alphazero | 87732e18ec0f23469f6246de54388ef9cb324575 | [
"MIT"
] | 3 | 2021-03-21T19:11:17.000Z | 2021-04-20T17:40:46.000Z | models/senet.py | phil-hawkins/multiplayer-alphazero | 87732e18ec0f23469f6246de54388ef9cb324575 | [
"MIT"
] | 9 | 2019-11-26T05:05:34.000Z | 2022-03-22T22:08:53.000Z | import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
sys.path.append("..")
from model import Model
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
# Excitation
out = out * w
out += shortcut
return out
class SENet(Model):
def __init__(self, input_shape, p_shape, v_shape, block=PreActBlock, num_blocks=[2,2,2,2]):
super(SENet, self).__init__(input_shape, p_shape, v_shape)
self.in_planes = 64
self.conv1 = nn.Conv2d(input_shape[-1], 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.p_head = torch.nn.Linear(512, np.prod(p_shape))
self.v_head = torch.nn.Linear(512, np.prod(v_shape))
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
batch_size = len(x)
this_p_shape = tuple([batch_size] + list(self.p_shape))
this_v_shape = tuple([batch_size] + list(self.v_shape))
x = x.permute(0,3,1,2) # NHWC -> NCHW
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
flat = out.view(out.size(0), -1)
p_logits = self.p_head(flat).view(this_p_shape)
v = torch.tanh(self.v_head(flat).view(this_v_shape))
return p_logits, v
def SENet18():
return SENet(PreActBlock, [2,2,2,2]) | 35.44 | 102 | 0.605643 |
79456b7de43894340dd09a1a5a09547601d61926 | 1,894 | py | Python | python/paddle/fluid/tests/unittests/test_selu_op.py | Sand3r-/Paddle | 1217a521554d63caa1381b8716910d0268dfc22d | [
"Apache-2.0"
] | 2 | 2017-05-15T06:52:18.000Z | 2017-06-13T11:55:11.000Z | python/paddle/fluid/tests/unittests/test_selu_op.py | Sand3r-/Paddle | 1217a521554d63caa1381b8716910d0268dfc22d | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_selu_op.py | Sand3r-/Paddle | 1217a521554d63caa1381b8716910d0268dfc22d | [
"Apache-2.0"
] | 1 | 2020-09-12T21:35:19.000Z | 2020-09-12T21:35:19.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import six
from op_test import OpTest
class SeluTest(OpTest):
def setUp(self):
self.op_type = "selu"
self.x_shape = [3, 5, 5, 10]
self.dtype = np.float64
self.init_x_shape()
self.init_dtype()
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
x = np.random.normal(size=self.x_shape).astype(self.dtype)
# Since zero point in selu is not differentiable, avoid randomize
# zero.
x[np.abs(x) < 0.005] = 0.02
x_flat = x.flatten()
for i in range(x_flat.size):
if x_flat[i] < 0:
x_flat[i] = alpha * np.exp(x_flat[i]) - alpha
x_flat[i] = scale * x_flat[i]
out_np = x_flat.reshape(self.x_shape)
self.inputs = {'X': x}
self.outputs = {'Out': out_np}
self.attrs = {
'alpha': alpha,
'scale': scale,
}
def init_x_shape(self):
pass
def init_dtype(self):
pass
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == "__main__":
unittest.main()
| 26.305556 | 74 | 0.633052 |
79456bec3e2e3d6d89a04f83153816fbe8a434ac | 3,519 | py | Python | ironic/conf/opts.py | markbeierl/ironic | bcf5b37c736bc36abe94489c366fe26f198a7e7a | [
"Apache-2.0"
] | null | null | null | ironic/conf/opts.py | markbeierl/ironic | bcf5b37c736bc36abe94489c366fe26f198a7e7a | [
"Apache-2.0"
] | null | null | null | ironic/conf/opts.py | markbeierl/ironic | bcf5b37c736bc36abe94489c366fe26f198a7e7a | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from oslo_log import log
import ironic.conf
_default_opt_lists = [
ironic.conf.default.api_opts,
ironic.conf.default.driver_opts,
ironic.conf.default.exc_log_opts,
ironic.conf.default.hash_opts,
ironic.conf.default.image_opts,
ironic.conf.default.img_cache_opts,
ironic.conf.default.netconf_opts,
ironic.conf.default.notification_opts,
ironic.conf.default.path_opts,
ironic.conf.default.portgroup_opts,
ironic.conf.default.service_opts,
ironic.conf.default.utils_opts,
]
_opts = [
('DEFAULT', itertools.chain(*_default_opt_lists)),
('agent', ironic.conf.agent.opts),
('ansible', ironic.conf.ansible.opts),
('api', ironic.conf.api.opts),
('audit', ironic.conf.audit.opts),
('cimc', ironic.conf.cisco.cimc_opts),
('cinder', ironic.conf.cinder.list_opts()),
('cisco_ucs', ironic.conf.cisco.ucsm_opts),
('conductor', ironic.conf.conductor.opts),
('console', ironic.conf.console.opts),
('database', ironic.conf.database.opts),
('deploy', ironic.conf.deploy.opts),
('dhcp', ironic.conf.dhcp.opts),
('drac', ironic.conf.drac.opts),
('glance', ironic.conf.glance.list_opts()),
('healthcheck', ironic.conf.healthcheck.opts),
('ilo', ironic.conf.ilo.opts),
('inspector', ironic.conf.inspector.list_opts()),
('ipmi', ironic.conf.ipmi.opts),
('irmc', ironic.conf.irmc.opts),
('iscsi', ironic.conf.iscsi.opts),
('metrics', ironic.conf.metrics.opts),
('metrics_statsd', ironic.conf.metrics_statsd.opts),
('neutron', ironic.conf.neutron.list_opts()),
('pxe', ironic.conf.pxe.opts),
('service_catalog', ironic.conf.service_catalog.list_opts()),
('snmp', ironic.conf.snmp.opts),
('swift', ironic.conf.swift.list_opts()),
('xclarity', ironic.conf.xclarity.opts),
]
def list_opts():
"""Return a list of oslo.config options available in Ironic code.
The returned list includes all oslo.config options. Each element of
the list is a tuple. The first element is the name of the group, the
second element is the options.
The function is discoverable via the 'ironic' entry point under the
'oslo.config.opts' namespace.
The function is used by Oslo sample config file generator to discover the
options.
:returns: a list of (group, options) tuples
"""
return _opts
def update_opt_defaults():
log.set_defaults(
default_log_levels=[
'amqp=WARNING',
'amqplib=WARNING',
'qpid.messaging=INFO',
'oslo.messaging=INFO',
'sqlalchemy=WARNING',
'stevedore=INFO',
'eventlet.wsgi.server=INFO',
'iso8601=WARNING',
'requests=WARNING',
'neutronclient=WARNING',
'glanceclient=WARNING',
'urllib3.connectionpool=WARNING',
'keystonemiddleware.auth_token=INFO',
'keystoneauth.session=INFO',
]
)
| 33.836538 | 77 | 0.672634 |
79456c07cd8900ea15f1787b3d3de3116b6f6e16 | 3,398 | py | Python | xautodl/trade_models/naive_v2_model.py | Joey61Liuyi/AutoDL-Projects | 2092e144920e82d74753a7ac31e1890a150d41cf | [
"MIT"
] | 817 | 2020-01-15T00:23:41.000Z | 2022-03-31T14:52:03.000Z | xautodl/trade_models/naive_v2_model.py | Joey61Liuyi/AutoDL-Projects | 2092e144920e82d74753a7ac31e1890a150d41cf | [
"MIT"
] | 77 | 2020-01-14T14:02:45.000Z | 2022-03-25T07:06:02.000Z | xautodl/trade_models/naive_v2_model.py | Joey61Liuyi/AutoDL-Projects | 2092e144920e82d74753a7ac31e1890a150d41cf | [
"MIT"
] | 176 | 2020-01-15T10:39:41.000Z | 2022-03-31T04:24:53.000Z | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021 #
##################################################
# A Simple Model that reused the prices of last day
##################################################
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import pandas as pd
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
class NAIVE_V2(Model):
"""NAIVE Version 2 Quant Model"""
def __init__(self, d_feat=6, seed=None, **kwargs):
# Set logger.
self.logger = get_module_logger("NAIVE")
self.logger.info("NAIVE version...")
# set hyper-parameters.
self.d_feat = d_feat
self.seed = seed
self.logger.info(
"NAIVE parameters setting: d_feat={:}, seed={:}".format(
self.d_feat, self.seed
)
)
if self.seed is not None:
random.seed(self.seed)
np.random.seed(self.seed)
self.fitted = False
def process_data(self, features):
features = features.reshape(len(features), self.d_feat, -1)
features = features.transpose((0, 2, 1))
return features[:, :59, 0]
def mse(self, preds, labels):
masks = ~np.isnan(labels)
masked_preds = preds[masks]
masked_labels = labels[masks]
return np.square(masked_preds - masked_labels).mean()
def model(self, x):
x = 1 / x - 1
masks = ~np.isnan(x)
results = []
for rowd, rowm in zip(x, masks):
temp = rowd[rowm]
if rowm.any():
results.append(float(rowd[rowm][-1]))
else:
results.append(0)
return np.array(results, dtype=x.dtype)
def fit(self, dataset: DatasetH):
def _prepare_dataset(df_data):
features = df_data["feature"].values
features = self.process_data(features)
labels = df_data["label"].values.squeeze()
return dict(features=features, labels=labels)
df_train, df_valid, df_test = dataset.prepare(
["train", "valid", "test"],
col_set=["feature", "label"],
data_key=DataHandlerLP.DK_L,
)
train_dataset, valid_dataset, test_dataset = (
_prepare_dataset(df_train),
_prepare_dataset(df_valid),
_prepare_dataset(df_test),
)
# df_train['feature']['CLOSE1'].values
# train_dataset['features'][:, -1]
train_mse_loss = self.mse(
self.model(train_dataset["features"]), train_dataset["labels"]
)
valid_mse_loss = self.mse(
self.model(valid_dataset["features"]), valid_dataset["labels"]
)
self.logger.info("Training MSE loss: {:}".format(train_mse_loss))
self.logger.info("Validation MSE loss: {:}".format(valid_mse_loss))
self.fitted = True
def predict(self, dataset):
if not self.fitted:
raise ValueError("The model is not fitted yet!")
x_test = dataset.prepare("test", col_set="feature")
index = x_test.index
preds = self.model(self.process_data(x_test.values))
return pd.Series(preds, index=index)
| 32.673077 | 75 | 0.569747 |
79456c41f8fc227a814f823026c9dd88ead89f84 | 3,489 | py | Python | apps/users/tests/tests.py | michal-siedlecki/my_business | ceba1c03af6378563671dad0bd5933f53f442d24 | [
"MIT"
] | null | null | null | apps/users/tests/tests.py | michal-siedlecki/my_business | ceba1c03af6378563671dad0bd5933f53f442d24 | [
"MIT"
] | 7 | 2021-03-05T23:08:02.000Z | 2022-03-12T00:47:19.000Z | apps/users/tests/tests.py | michal-siedlecki/my_business | ceba1c03af6378563671dad0bd5933f53f442d24 | [
"MIT"
] | null | null | null | from django.http import QueryDict
from django.test import TestCase, Client
from django.urls import reverse
from django.db.models import fields
from apps.users.models import Profile, Address
from mybusiness.factories import model_factory, data_factory
def get_to_list_fields(model):
return list(
field
for field in model._meta.get_fields()
if (
not isinstance(field, fields.AutoField)
and not isinstance(field, fields.reverse_related.OneToOneRel)
and not isinstance(field, fields.related.OneToOneField)
)
)
class AddressModelTests(TestCase):
def setUp(self) -> None:
self.client = Client()
self.user = model_factory.create_user()
self.client.force_login(user=self.user)
self.address = self.user.profile.address
def test_default_address_exists(self):
self.assertIsNotNone(self.address)
def test_address_to_list(self):
address_list_fields = get_to_list_fields(Address)
address_list = self.address.to_list()
self.assertIs(isinstance(address_list, list), True)
self.assertEqual(len(address_list), len(address_list_fields))
class ProfileModelTests(TestCase):
def setUp(self):
self.client = Client()
self.user = model_factory.create_user()
self.client.force_login(user=self.user)
model_factory.update_fake_user_profile(user=self.user)
def test_user_profile_is_auto_created(self):
user_profile = Profile.objects.get(user=self.user)
self.assertEqual(user_profile.user, self.user)
class UnlogedUserViewTests(TestCase):
def setUp(self) -> None:
self.client = Client()
def test_not_logged_user_can_see_about_view(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "about")
def test_not_logged_user_can_see_login_view(self):
response = self.client.get("/login/")
self.assertEqual(response.status_code, 200)
def test_not_logged_user_cant_see_invoices(self):
response = self.client.get("/invoices/")
self.assertEqual(response.status_code, 302) # test if user is redirect to login
self.assertEqual(response.url, "/login/?next=/invoices/")
class UserViewsTests(TestCase):
def setUp(self):
self.client = Client()
self.user = model_factory.create_user()
self.client.force_login(user=self.user)
model_factory.update_fake_user_profile(user=self.user)
def test_logged_user_can_see_profile_view(self):
url = reverse("profile")
response = self.client.get(url)
profile_data = Profile.objects.get(user=self.user)
self.assertEqual(response.status_code, 200)
for x in profile_data.to_list():
self.assertContains(response, x)
def test_user_can_update_profile(self):
url = reverse("profile")
profile_data = data_factory.create_profile_data()
address_data = data_factory.create_address_data()
query_dict = QueryDict("", mutable=True)
query_dict.update(profile_data)
query_dict.update(address_data)
response = self.client.post(url, query_dict)
self.assertEqual(response.url, "/profile/")
self.assertEqual(
Profile.objects.get(user=self.user).company_name,
profile_data.get("company_name"),
)
class UserFormTests(TestCase):
pass
| 33.873786 | 88 | 0.690456 |
79456d4b360ab52cd62ba5cdc046a66bb1db73a9 | 2,130 | py | Python | hw8_release/.env/bin/player.py | YuanSun-au/CS131 | 2f0e4896beb80926636bdf2fb57a119ae77ff45a | [
"MIT"
] | null | null | null | hw8_release/.env/bin/player.py | YuanSun-au/CS131 | 2f0e4896beb80926636bdf2fb57a119ae77ff45a | [
"MIT"
] | null | null | null | hw8_release/.env/bin/player.py | YuanSun-au/CS131 | 2f0e4896beb80926636bdf2fb57a119ae77ff45a | [
"MIT"
] | 1 | 2020-08-05T00:08:23.000Z | 2020-08-05T00:08:23.000Z | #!/home/shawn/Documents/CS131/CS131/hw8_release/.env/bin/python3
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
# --------------------------------------------------------------------
# an image animation player
class UI(tkinter.Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
duration = im.info.get("duration", 100)
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
duration = im.info.get("duration", 100)
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| 21.734694 | 80 | 0.497653 |
79456ddcb882760ba2c3e53a5731300caf3ceb38 | 974 | py | Python | hring/src/Script/DeC/sim_MBNoC_8x8.py | anderson1008/Noculator | 411964ce333c3bd587840554efef6e61c0b9b4d5 | [
"MIT"
] | null | null | null | hring/src/Script/DeC/sim_MBNoC_8x8.py | anderson1008/Noculator | 411964ce333c3bd587840554efef6e61c0b9b4d5 | [
"MIT"
] | null | null | null | hring/src/Script/DeC/sim_MBNoC_8x8.py | anderson1008/Noculator | 411964ce333c3bd587840554efef6e61c0b9b4d5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import os
workload_dir = "../bin/workload_list/"
SIM_NUM = 100
# 64-node MBNoC
out_dir = "../results/MBNoC/8x8/"
workload = "hetero_workload_8x8"
network_nrX = "8"
network_nrY = "8"
router_addrPacketSize = "2"
router_dataPacketSize = "8"
router_maxPacketSize = "8"
topology = "Mesh_Multi"
router_algorithm = "BLESS_BYPASS"
for sim_index in range(1, SIM_NUM+1, 1):
print ("New Simulation!")
out_file = "sim_" + str(sim_index) + ".out"
command_line = "mono ../bin/sim.exe -config ../bin/config.txt -output " + out_dir + out_file + " -workload " + workload_dir + workload + ' ' + str (sim_index) + " -router.algorithm " + router_algorithm + " -router.addrPacketSize " + router_addrPacketSize + " -router.dataPacketSize " + router_dataPacketSize + " -router.maxPacketSize " + router_maxPacketSize + " -network_nrX " + network_nrX + " -network_nrY " + network_nrY + " -topology " + topology
os.system (command_line)
| 37.461538 | 453 | 0.689938 |
79456e6003a66ce2ecd0be24972a3e7d94d40bae | 11,991 | py | Python | test/mitmproxy/tools/web/test_app.py | stvvan/mitmproxy | 27883e7b05032961cbee2b0f6a6867e4cc5d11d6 | [
"MIT"
] | 9 | 2021-12-19T13:47:10.000Z | 2022-03-26T06:34:02.000Z | test/mitmproxy/tools/web/test_app.py | ycrao/mitmproxy | ab6f1ebb4415620ce02d61d0ff1c72e6b84b5198 | [
"MIT"
] | 13 | 2021-06-25T20:56:39.000Z | 2022-02-13T22:59:39.000Z | test/mitmproxy/tools/web/test_app.py | ycrao/mitmproxy | ab6f1ebb4415620ce02d61d0ff1c72e6b84b5198 | [
"MIT"
] | 3 | 2021-12-20T08:21:47.000Z | 2022-03-29T17:55:12.000Z | import asyncio
import json as _json
import logging
import os
import sys
from unittest import mock
import pytest
if sys.platform == 'win32':
# workaround for
# https://github.com/tornadoweb/tornado/issues/2751
# https://www.tornadoweb.org/en/stable/index.html#installation
# (copied multiple times in the codebase, please remove all occurrences)
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
import tornado.testing # noqa
from tornado import httpclient # noqa
from tornado import websocket # noqa
from mitmproxy import options # noqa
from mitmproxy.test import tflow # noqa
from mitmproxy.tools.web import app # noqa
from mitmproxy.tools.web import master as webmaster # noqa
@pytest.fixture(scope="module")
def no_tornado_logging():
logging.getLogger('tornado.access').disabled = True
logging.getLogger('tornado.application').disabled = True
logging.getLogger('tornado.general').disabled = True
yield
logging.getLogger('tornado.access').disabled = False
logging.getLogger('tornado.application').disabled = False
logging.getLogger('tornado.general').disabled = False
def json(resp: httpclient.HTTPResponse):
return _json.loads(resp.body.decode())
@pytest.mark.usefixtures("no_tornado_logging")
class TestApp(tornado.testing.AsyncHTTPTestCase):
def get_new_ioloop(self):
io_loop = tornado.platform.asyncio.AsyncIOLoop()
asyncio.set_event_loop(io_loop.asyncio_loop)
return io_loop
def get_app(self):
o = options.Options(http2=False)
m = webmaster.WebMaster(o, with_termlog=False)
f = tflow.tflow(resp=True)
f.id = "42"
m.view.add([f])
m.view.add([tflow.tflow(err=True)])
m.log.info("test log")
self.master = m
self.view = m.view
self.events = m.events
webapp = app.Application(m, None)
webapp.settings["xsrf_cookies"] = False
return webapp
def fetch(self, *args, **kwargs) -> httpclient.HTTPResponse:
# tornado disallows POST without content by default.
return super().fetch(*args, **kwargs, allow_nonstandard_methods=True)
def put_json(self, url, data: dict) -> httpclient.HTTPResponse:
return self.fetch(
url,
method="PUT",
body=_json.dumps(data),
headers={"Content-Type": "application/json"},
)
def test_index(self):
assert self.fetch("/").code == 200
def test_filter_help(self):
assert self.fetch("/filter-help").code == 200
def test_flows(self):
resp = self.fetch("/flows")
assert resp.code == 200
assert json(resp)[0]["request"]["contentHash"]
assert json(resp)[1]["error"]
def test_flows_dump(self):
resp = self.fetch("/flows/dump")
assert b"address" in resp.body
def test_clear(self):
events = self.events.data.copy()
flows = list(self.view)
assert self.fetch("/clear", method="POST").code == 200
assert not len(self.view)
assert not len(self.events.data)
# restore
for f in flows:
self.view.add([f])
self.events.data = events
def test_resume(self):
for f in self.view:
f.intercept()
assert self.fetch(
"/flows/42/resume", method="POST").code == 200
assert sum(f.intercepted for f in self.view) == 1
assert self.fetch("/flows/resume", method="POST").code == 200
assert all(not f.intercepted for f in self.view)
def test_kill(self):
for f in self.view:
f.backup()
f.intercept()
assert self.fetch("/flows/42/kill", method="POST").code == 200
assert sum(f.killable for f in self.view) == 1
assert self.fetch("/flows/kill", method="POST").code == 200
assert all(not f.killable for f in self.view)
for f in self.view:
f.revert()
def test_flow_delete(self):
f = self.view.get_by_id("42")
assert f
assert self.fetch("/flows/42", method="DELETE").code == 200
assert not self.view.get_by_id("42")
self.view.add([f])
assert self.fetch("/flows/1234", method="DELETE").code == 404
def test_flow_update(self):
f = self.view.get_by_id("42")
assert f.request.method == "GET"
f.backup()
upd = {
"request": {
"method": "PATCH",
"port": 123,
"headers": [("foo", "bar")],
"content": "req",
},
"response": {
"msg": "Non-Authorisé",
"code": 404,
"headers": [("bar", "baz")],
"content": "resp",
}
}
assert self.put_json("/flows/42", upd).code == 200
assert f.request.method == "PATCH"
assert f.request.port == 123
assert f.request.headers["foo"] == "bar"
assert f.request.text == "req"
assert f.response.msg == "Non-Authorisé"
assert f.response.status_code == 404
assert f.response.headers["bar"] == "baz"
assert f.response.text == "resp"
f.revert()
assert self.put_json("/flows/42", {"foo": 42}).code == 400
assert self.put_json("/flows/42", {"request": {"foo": 42}}).code == 400
assert self.put_json("/flows/42", {"response": {"foo": 42}}).code == 400
assert self.fetch("/flows/42", method="PUT", body="{}").code == 400
assert self.fetch(
"/flows/42",
method="PUT",
headers={"Content-Type": "application/json"},
body="!!"
).code == 400
def test_flow_duplicate(self):
resp = self.fetch("/flows/42/duplicate", method="POST")
assert resp.code == 200
f = self.view.get_by_id(resp.body.decode())
assert f
assert f.id != "42"
self.view.remove([f])
def test_flow_revert(self):
f = self.view.get_by_id("42")
f.backup()
f.request.method = "PATCH"
self.fetch("/flows/42/revert", method="POST")
assert not f._backup
def test_flow_replay(self):
with mock.patch("mitmproxy.command.CommandManager.call") as replay_call:
assert self.fetch("/flows/42/replay", method="POST").code == 200
assert replay_call.called
def test_flow_content(self):
f = self.view.get_by_id("42")
f.backup()
f.response.headers["Content-Encoding"] = "ran\x00dom"
f.response.headers["Content-Disposition"] = 'inline; filename="filename.jpg"'
r = self.fetch("/flows/42/response/content.data")
assert r.body == b"message"
assert r.headers["Content-Encoding"] == "random"
assert r.headers["Content-Disposition"] == 'attachment; filename="filename.jpg"'
del f.response.headers["Content-Disposition"]
f.request.path = "/foo/bar.jpg"
assert self.fetch(
"/flows/42/response/content.data"
).headers["Content-Disposition"] == 'attachment; filename=bar.jpg'
f.response.content = b""
assert self.fetch("/flows/42/response/content.data").code == 400
f.revert()
def test_update_flow_content(self):
assert self.fetch(
"/flows/42/request/content.data",
method="POST",
body="new"
).code == 200
f = self.view.get_by_id("42")
assert f.request.content == b"new"
assert f.modified()
f.revert()
def test_update_flow_content_multipart(self):
body = (
b'--somefancyboundary\r\n'
b'Content-Disposition: form-data; name="a"; filename="a.txt"\r\n'
b'\r\n'
b'such multipart. very wow.\r\n'
b'--somefancyboundary--\r\n'
)
assert self.fetch(
"/flows/42/request/content.data",
method="POST",
headers={"Content-Type": 'multipart/form-data; boundary="somefancyboundary"'},
body=body
).code == 200
f = self.view.get_by_id("42")
assert f.request.content == b"such multipart. very wow."
assert f.modified()
f.revert()
def test_flow_content_view(self):
assert json(self.fetch("/flows/42/request/content/raw")) == {
"lines": [
[["text", "content"]]
],
"description": "Raw"
}
def test_events(self):
resp = self.fetch("/events")
assert resp.code == 200
assert json(resp)[0]["level"] == "info"
def test_settings(self):
assert json(self.fetch("/settings"))["mode"] == "regular"
def test_settings_update(self):
assert self.put_json("/settings", {"anticache": True}).code == 200
assert self.put_json("/settings", {"wtf": True}).code == 400
def test_options(self):
j = json(self.fetch("/options"))
assert type(j) == dict
assert type(j['anticache']) == dict
def test_option_update(self):
assert self.put_json("/options", {"anticache": True}).code == 200
assert self.put_json("/options", {"wtf": True}).code == 400
assert self.put_json("/options", {"anticache": "foo"}).code == 400
def test_option_save(self):
assert self.fetch("/options/save", method="POST").code == 200
def test_err(self):
with mock.patch("mitmproxy.tools.web.app.IndexHandler.get") as f:
f.side_effect = RuntimeError
assert self.fetch("/").code == 500
@tornado.testing.gen_test
def test_websocket(self):
ws_url = f"ws://localhost:{self.get_http_port()}/updates"
ws_client = yield websocket.websocket_connect(ws_url)
self.master.options.anticomp = True
r1 = yield ws_client.read_message()
r2 = yield ws_client.read_message()
j1 = _json.loads(r1)
j2 = _json.loads(r2)
response = dict()
response[j1['resource']] = j1
response[j2['resource']] = j2
assert response['settings'] == {
"resource": "settings",
"cmd": "update",
"data": {"anticomp": True},
}
assert response['options'] == {
"resource": "options",
"cmd": "update",
"data": {
"anticomp": {
"value": True,
"choices": None,
"default": False,
"help": "Try to convince servers to send us un-compressed data.",
"type": "bool",
}
}
}
ws_client.close()
# trigger on_close by opening a second connection.
ws_client2 = yield websocket.websocket_connect(ws_url)
ws_client2.close()
def _test_generate_tflow_js(self):
_tflow = app.flow_to_json(tflow.tflow(resp=True, err=True))
# Set some value as constant, so that _tflow.js would not change every time.
_tflow['client_conn']['id'] = "4a18d1a0-50a1-48dd-9aa6-d45d74282939"
_tflow['id'] = "d91165be-ca1f-4612-88a9-c0f8696f3e29"
_tflow['server_conn']['id'] = "f087e7b2-6d0a-41a8-a8f0-e1a4761395f8"
_tflow["request"]["trailers"] = [["trailer", "qvalue"]]
_tflow["response"]["trailers"] = [["trailer", "qvalue"]]
tflow_json = _json.dumps(_tflow, indent=4, sort_keys=True)
here = os.path.abspath(os.path.dirname(__file__))
web_root = os.path.join(here, os.pardir, os.pardir, os.pardir, os.pardir, 'web')
tflow_path = os.path.join(web_root, 'src/js/__tests__/ducks/_tflow.js')
content = (
f"/** Auto-generated by test_app.py:TestApp._test_generate_tflow_js */\n"
f"export default function(){{\n"
f" return {tflow_json}\n"
f"}}"
)
with open(tflow_path, 'w', newline="\n") as f:
f.write(content)
| 34.358166 | 90 | 0.577516 |
79456f70830f685ef4fb6ea60bc1d0d525315dee | 16,823 | py | Python | resnet_.py | SeunghwanByun/LBMNet | 90d05d5147d3b118ed869ba5781632173a8b528b | [
"MIT"
] | null | null | null | resnet_.py | SeunghwanByun/LBMNet | 90d05d5147d3b118ed869ba5781632173a8b528b | [
"MIT"
] | null | null | null | resnet_.py | SeunghwanByun/LBMNet | 90d05d5147d3b118ed869ba5781632173a8b528b | [
"MIT"
] | 2 | 2021-01-25T06:23:57.000Z | 2022-01-19T05:47:00.000Z | import torch
import torch.nn as nn
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# from .utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet50_888', 'resnet50_8816', 'resnet50_81616', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, input_channel=32, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(input_channel, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def _resnet_888(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, replace_stride_with_dilation=[False, 1, 2], **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def _resnet_8816(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, replace_stride_with_dilation=[False, 1, False], **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def _resnet_81616(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, replace_stride_with_dilation=[False, False, 1], **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50_888(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet_888('resnet50_888', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50_8816(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet_8816('resnet50_8816', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50_81616(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet_81616('resnet50_81616', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | 42.375315 | 120 | 0.640968 |
79456fa484d9d2cbcd40781d6f84a561303604d4 | 116 | py | Python | weather/weather_app/admin.py | nijatmursali/WeatherAppUsingDjango | adf179726d9cd78d80e206e19ddec4250d76cd3f | [
"Apache-2.0"
] | null | null | null | weather/weather_app/admin.py | nijatmursali/WeatherAppUsingDjango | adf179726d9cd78d80e206e19ddec4250d76cd3f | [
"Apache-2.0"
] | null | null | null | weather/weather_app/admin.py | nijatmursali/WeatherAppUsingDjango | adf179726d9cd78d80e206e19ddec4250d76cd3f | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import City
# Register your models here.
admin.site.register(City)
| 14.5 | 32 | 0.784483 |
79456fc94c66883b61364521d90933d834742722 | 962 | py | Python | functions/v2/pubsub/main.py | InstantDomain/python-docs-samples | f8e293c722998b269da38b7fe11b98aae8932b8f | [
"Apache-2.0"
] | null | null | null | functions/v2/pubsub/main.py | InstantDomain/python-docs-samples | f8e293c722998b269da38b7fe11b98aae8932b8f | [
"Apache-2.0"
] | null | null | null | functions/v2/pubsub/main.py | InstantDomain/python-docs-samples | f8e293c722998b269da38b7fe11b98aae8932b8f | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START functions_cloudevent_pubsub]
import base64
import functions_framework
# Triggered from a message on a Cloud Pub/Sub topic.
@functions_framework.cloud_event
def subscribe(cloud_event):
# Print out the data from Pub/Sub, to prove that it worked
print("Hello, " + base64.b64decode(cloud_event.data["message"]["data"]).decode() + "!")
# [END functions_cloudevent_pubsub]
| 35.62963 | 91 | 0.759875 |
7945707455ff1da4b04f06abc70029cbec5ae4ea | 719 | py | Python | tests/nfs_module_test.py | web-sys1/NFSyndication | 96d84444cf5d217713e162f9699d38b495a4e3ec | [
"BSD-2-Clause"
] | null | null | null | tests/nfs_module_test.py | web-sys1/NFSyndication | 96d84444cf5d217713e162f9699d38b495a4e3ec | [
"BSD-2-Clause"
] | null | null | null | tests/nfs_module_test.py | web-sys1/NFSyndication | 96d84444cf5d217713e162f9699d38b495a4e3ec | [
"BSD-2-Clause"
] | null | null | null | import os, glob
import pytest
import subprocess
import pytest
from NFSyndication import init as NFS_init
from NFSyndication.core import args
# test@
# Change the action associated with your option to action='store'
def test_conf():
#We use these conditions to check the statement
args.outputJSON = "feed-output.json"
subscriptions = [
'http://feedpress.me/512pixels',
'http://www.leancrew.com/all-this/feed/',
'http://ihnatko.com/feed/',
'http://blog.ashleynh.me/feed']
with open(f'feeds.txt', 'w', encoding='utf8') as f:
f.write(",".join(subscriptions).replace(',', '\n'))
return NFS_init()
def test_entrypoint():
#Then initialize code
return test_conf()
| 26.62963 | 65 | 0.681502 |
79457167db6809a3ffcec29e1fc85d4b9dafe5ca | 1,753 | py | Python | tests/mockserver.py | peter-bertuglia/python-sdk | c5930a9833005df2e8b669099c70e0a1644cdf0e | [
"ISC"
] | null | null | null | tests/mockserver.py | peter-bertuglia/python-sdk | c5930a9833005df2e8b669099c70e0a1644cdf0e | [
"ISC"
] | 2 | 2021-12-04T02:41:33.000Z | 2022-01-12T00:41:07.000Z | tests/mockserver.py | peter-bertuglia/python-sdk | c5930a9833005df2e8b669099c70e0a1644cdf0e | [
"ISC"
] | 2 | 2021-12-04T02:25:10.000Z | 2022-01-11T19:32:39.000Z | import requests
import uuid
from flask import Flask, jsonify
from threading import Thread
# From https://gist.github.com/eruvanos/f6f62edb368a20aaa880e12976620db8
class MockServer(Thread):
def __init__(self, port=5000):
super().__init__()
print("server running on port %s" % port)
self.port = port
self.app = Flask(__name__)
self.url = "http://localhost:%s" % self.port
self.app.add_url_rule("/shutdown", view_func=self._shutdown_server)
def _shutdown_server(self):
from flask import request
if not 'werkzeug.server.shutdown' in request.environ:
raise RuntimeError('Not running the development server')
request.environ['werkzeug.server.shutdown']()
return 'Server shutting down...'
def _return_request_body(self):
from flask import request
self.log_callback(request.json)
return request.json
def shutdown_server(self):
requests.get("http://localhost:%s/shutdown" % self.port)
self.join()
def add_callback_response(self, url, callback, methods=('POST',)):
callback.__name__ = str(uuid.uuid4()) # change name of method to mitigate flask exception
self.app.add_url_rule(url, view_func=callback, methods=methods)
def add_log_event_response(self, callback):
self.log_callback = callback
self.app.add_url_rule("/log_event", view_func=self._return_request_body, methods=('POST',))
def add_json_response(self, url, serializable, methods=('POST',)):
def callback():
return jsonify(serializable)
self.add_callback_response(url, callback, methods=methods)
def run(self):
self.app.run(port=self.port) | 35.06 | 99 | 0.669709 |
794571e1087d8bd361abc9104517ffcf29734148 | 11,903 | py | Python | muak/vector2d.py | ok65/muak | 885d2bca83b90a37c17df88c1aeb66b49958ae2d | [
"WTFPL"
] | null | null | null | muak/vector2d.py | ok65/muak | 885d2bca83b90a37c17df88c1aeb66b49958ae2d | [
"WTFPL"
] | null | null | null | muak/vector2d.py | ok65/muak | 885d2bca83b90a37c17df88c1aeb66b49958ae2d | [
"WTFPL"
] | null | null | null |
# Library import
import operator
import math
class Vector2D:
__slots__ = ['_x', '_y']
def __init__(self, x_or_pair=None, y = None, bearing = None, magnitude = None):
"""
2D Integer vector class, 'inspired' by pygame/cocos2d etc
:param x_or_pair:
:param y:
:param bearing:
:param magnitude:
"""
# Initialise internal x/y values to zero
self._x = 0
self._y = 0
# Set x and y by various different means
if bearing is not None and magnitude is not None:
self.x = magnitude * math.cos(math.radians(bearing))
self.y = magnitude * math.sin(math.radians(bearing))
elif y is not None:
self.x = x_or_pair
self.y = y
elif x_or_pair is not None:
self.x = x_or_pair[0]
self.y = x_or_pair[1]
else:
raise Exception("Not enough parameters passed")
@property
def x(self) -> int:
return self._x
@x.setter
def x(self, value: int):
self._x = int(value)
@property
def y(self) -> int:
return self._y
@y.setter
def y(self, value: int):
self._y = int(value)
def tuple(self) -> tuple:
return self.x, self.y
def copy(self):
return Vector2D(self.x, self.y)
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError("Invalid subscript "+str(key)+" to Vector2D")
def __setitem__(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise IndexError("Invalid subscript "+str(key)+" to Vector2D")
# String representaion (for debugging)
def __repr__(self):
return 'Vector2D({}, {})'.format(self.x, self.y)
# Comparison
def __eq__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x == other[0] and self.y == other[1]
else:
return False
def __ne__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x != other[0] or self.y != other[1]
else:
return True
def __nonzero__(self):
return bool(self.x or self.y)
# Generic operator handlers
def _o2(self, other, f):
"Any two-operator operation where the left operand is a Vector2D"
if isinstance(other, Vector2D):
return Vector2D(f(self.x, other.x),
f(self.y, other.y))
elif (hasattr(other, "__getitem__")):
return Vector2D(f(self.x, other[0]),
f(self.y, other[1]))
else:
return Vector2D(f(self.x, other),
f(self.y, other))
def _r_o2(self, other, f):
"Any two-operator operation where the right operand is a Vector2D"
if (hasattr(other, "__getitem__")):
return Vector2D(f(other[0], self.x),
f(other[1], self.y))
else:
return Vector2D(f(other, self.x),
f(other, self.y))
def _io(self, other, f):
"inplace operator"
if (hasattr(other, "__getitem__")):
self.x = f(self.x, other[0])
self.y = f(self.y, other[1])
else:
self.x = f(self.x, other)
self.y = f(self.y, other)
return self
# Addition
def __add__(self, other):
if isinstance(other, Vector2D):
return Vector2D(self.x + other.x, self.y + other.y)
elif hasattr(other, "__getitem__"):
return Vector2D(self.x + other[0], self.y + other[1])
else:
return Vector2D(self.x + other, self.y + other)
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector2D):
self.x += other.x
self.y += other.y
elif hasattr(other, "__getitem__"):
self.x += other[0]
self.y += other[1]
else:
self.x += other
self.y += other
return self
# Subtraction
def __sub__(self, other):
if isinstance(other, Vector2D):
return Vector2D(self.x - other.x, self.y - other.y)
elif (hasattr(other, "__getitem__")):
return Vector2D(self.x - other[0], self.y - other[1])
else:
return Vector2D(self.x - other, self.y - other)
def __rsub__(self, other):
if isinstance(other, Vector2D):
return Vector2D(other.x - self.x, other.y - self.y)
if (hasattr(other, "__getitem__")):
return Vector2D(other[0] - self.x, other[1] - self.y)
else:
return Vector2D(other - self.x, other - self.y)
def __isub__(self, other):
if isinstance(other, Vector2D):
self.x -= other.x
self.y -= other.y
elif (hasattr(other, "__getitem__")):
self.x -= other[0]
self.y -= other[1]
else:
self.x -= other
self.y -= other
return self
# Multiplication
def __mul__(self, other):
if isinstance(other, Vector2D):
return Vector2D(self.x*other.x, self.y*other.y)
if (hasattr(other, "__getitem__")):
return Vector2D(self.x*other[0], self.y*other[1])
else:
return Vector2D(self.x*other, self.y*other)
__rmul__ = __mul__
def __imul__(self, other):
if isinstance(other, Vector2D):
self.x *= other.x
self.y *= other.y
elif (hasattr(other, "__getitem__")):
self.x *= other[0]
self.y *= other[1]
else:
self.x *= other
self.y *= other
return self
# Division
def __div__(self, other):
return self._o2(other, operator.div)
def __rdiv__(self, other):
return self._r_o2(other, operator.div)
def __idiv__(self, other):
return self._io(other, operator.div)
def __floordiv__(self, other):
return self._o2(other, operator.floordiv)
def __rfloordiv__(self, other):
return self._r_o2(other, operator.floordiv)
def __ifloordiv__(self, other):
return self._io(other, operator.floordiv)
def __truediv__(self, other):
return self._o2(other, operator.truediv)
def __rtruediv__(self, other):
return self._r_o2(other, operator.truediv)
def __itruediv__(self, other):
return self._io(other, operator.floordiv)
# Modulo
def __mod__(self, other):
return self._o2(other, operator.mod)
def __rmod__(self, other):
return self._r_o2(other, operator.mod)
def __divmod__(self, other):
return self._o2(other, operator.divmod)
def __rdivmod__(self, other):
return self._r_o2(other, operator.divmod)
# Exponentation
def __pow__(self, other):
return self._o2(other, operator.pow)
def __rpow__(self, other):
return self._r_o2(other, operator.pow)
# Bitwise operators
def __lshift__(self, other):
return self._o2(other, operator.lshift)
def __rlshift__(self, other):
return self._r_o2(other, operator.lshift)
def __rshift__(self, other):
return self._o2(other, operator.rshift)
def __rrshift__(self, other):
return self._r_o2(other, operator.rshift)
def __and__(self, other):
return self._o2(other, operator.and_)
__rand__ = __and__
def __or__(self, other):
return self._o2(other, operator.or_)
__ror__ = __or__
def __xor__(self, other):
return self._o2(other, operator.xor)
__rxor__ = __xor__
# Unary operations
def __neg__(self):
return Vector2D(operator.neg(self.x), operator.neg(self.y))
def __pos__(self):
return Vector2D(operator.pos(self.x), operator.pos(self.y))
def __abs__(self):
return Vector2D(abs(self.x), abs(self.y))
def __invert__(self):
return Vector2D(-self.x, -self.y)
# vectory functions
def get_length_sqrd(self):
return self.x**2 + self.y**2
def get_length(self):
return math.sqrt(self.x**2 + self.y**2)
def __setlength(self, value):
length = self.get_length()
self.x *= value/length
self.y *= value/length
length = property(get_length, __setlength, None, "gets or sets the magnitude of the vector")
def rotate(self, angle_degrees):
radians = math.radians(angle_degrees)
cos = math.cos(radians)
sin = math.sin(radians)
x = self.x*cos - self.y*sin
y = self.x*sin + self.y*cos
self.x = x
self.y = y
def get_reflected(self):
return Vector2D(self.x * -1, self.y * -1)
def reflected(self):
return self.get_reflected()
def reflect(self):
self.x = self.x * -1
self.y = self.y * -1
def rotated(self, angle_degrees):
radians = math.radians(angle_degrees)
cos = math.cos(radians)
sin = math.sin(radians)
x = self.x*cos - self.y*sin
y = self.x*sin + self.y*cos
return Vector2D(x, y)
def get_angle(self):
if (self.get_length_sqrd() == 0):
return 0
return math.degrees(math.atan2(self.y, self.x))
def __setangle(self, angle_degrees):
self.x = self.length
self.y = 0
self.rotate(angle_degrees)
angle = property(get_angle, __setangle, None, "gets or sets the angle of a vector")
def get_angle_between(self, other):
cross = self.x*other[1] - self.y*other[0]
dot = self.x*other[0] + self.y*other[1]
return math.degrees(math.atan2(cross, dot))
def normalized(self):
length = self.length
if length != 0:
return self/length
return Vector2D(self)
def normalised(self):
return self.normalized()
def normalize_return_length(self):
length = self.length
if length != 0:
self.x /= length
self.y /= length
return length
def normalise_return_length(self):
return self.normalize_return_length()
def perpendicular(self):
return Vector2D(-self.y, self.x)
def perpendicular_normal(self):
length = self.length
if length != 0:
return Vector2D(-self.y/length, self.x/length)
return Vector2D(self)
def dot(self, other):
return float(self.x*other[0] + self.y*other[1])
def get_distance(self, other):
return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)
def get_dist_sqrd(self, other):
return (self.x - other[0])**2 + (self.y - other[1])**2
def projection(self, other):
other_length_sqrd = other[0]*other[0] + other[1]*other[1]
projected_length_times_other_length = self.dot(other)
return other*(projected_length_times_other_length/other_length_sqrd)
def clamp(self, max, min):
if self.x > max:
self.x = max
elif self.x < min:
self.x = min
if self.y > max:
self.y = max
elif self.y < min:
self.y = min
return self
def cross(self, other):
return self.x*other[1] - self.y*other[0]
def interpolate_to(self, other, range):
return Vector2D(self.x + (other[0] - self.x)*range, self.y + (other[1] - self.y)*range)
def convert_to_basis(self, x_vector, y_vector):
return Vector2D(self.dot(x_vector)/x_vector.get_length_sqrd(), self.dot(y_vector)/y_vector.get_length_sqrd())
def __getstate__(self):
return [self.x, self.y]
def __setstate__(self, dict):
self.x, self.y = dict
| 29.907035 | 117 | 0.56784 |
7945737596cc8c402f6a0d0f1ee4f5a1160d8bc3 | 118 | py | Python | setup.py | BBsjj/BBpose | 4be635ead0f99b1788160ca0b1c7b3947ba05526 | [
"MIT"
] | 23 | 2021-07-08T22:38:13.000Z | 2022-03-30T12:45:01.000Z | setup.py | BBsjj/BBpose | 4be635ead0f99b1788160ca0b1c7b3947ba05526 | [
"MIT"
] | 3 | 2021-09-12T13:20:30.000Z | 2022-02-15T06:33:02.000Z | setup.py | BBsjj/BBpose | 4be635ead0f99b1788160ca0b1c7b3947ba05526 | [
"MIT"
] | 13 | 2021-07-27T02:55:54.000Z | 2022-02-28T05:55:42.000Z | import setuptools
setuptools.setup(
name='bbpose',
version='0.1.0',
packages=setuptools.find_packages()
) | 16.857143 | 39 | 0.694915 |
794573d554875c480207754ad5ec6fd670b73219 | 3,361 | py | Python | tools/profiling/microbenchmarks/bm_diff/bm_build.py | eaglesunshine/grpc_learn | ecf33b83c9d49892539a3ffed9dc1553d91f59ae | [
"BSD-3-Clause"
] | 1 | 2017-12-12T20:55:14.000Z | 2017-12-12T20:55:14.000Z | tools/profiling/microbenchmarks/bm_diff/bm_build.py | eaglesunshine/grpc_learn | ecf33b83c9d49892539a3ffed9dc1553d91f59ae | [
"BSD-3-Clause"
] | null | null | null | tools/profiling/microbenchmarks/bm_diff/bm_build.py | eaglesunshine/grpc_learn | ecf33b83c9d49892539a3ffed9dc1553d91f59ae | [
"BSD-3-Clause"
] | 1 | 2020-11-04T04:12:37.000Z | 2020-11-04T04:12:37.000Z | # Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Python utility to build opt and counters benchmarks """
import bm_constants
import argparse
import subprocess
import multiprocessing
import os
import shutil
def _args():
argp = argparse.ArgumentParser(description='Builds microbenchmarks')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to build')
argp.add_argument(
'-j',
'--jobs',
type=int,
default=multiprocessing.cpu_count(),
help='How many CPUs to dedicate to this task')
argp.add_argument(
'-n',
'--name',
type=str,
help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
)
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
argp.set_defaults(counters=True)
args = argp.parse_args()
assert args.name
return args
def _make_cmd(cfg, benchmarks, jobs):
return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
def build(name, benchmarks, jobs, counters):
shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
subprocess.check_call(['git', 'submodule', 'update'])
try:
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
if counters:
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
if counters:
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
os.rename(
'bins',
'bm_diff_%s' % name,)
if __name__ == '__main__':
args = _args()
build(args.name, args.benchmarks, args.jobs, args.counters)
| 35.755319 | 93 | 0.734305 |
794575b5551d146bd9859f9ba8fb2cf963ba13ef | 17,705 | py | Python | pubsub/google/cloud/pubsub_v1/gapic/transports/subscriber_grpc_transport.py | TheNeuralBit/google-cloud-python | 226cdf12f5dd69afb0ef665bb9e897d32d56f4b6 | [
"Apache-2.0"
] | null | null | null | pubsub/google/cloud/pubsub_v1/gapic/transports/subscriber_grpc_transport.py | TheNeuralBit/google-cloud-python | 226cdf12f5dd69afb0ef665bb9e897d32d56f4b6 | [
"Apache-2.0"
] | null | null | null | pubsub/google/cloud/pubsub_v1/gapic/transports/subscriber_grpc_transport.py | TheNeuralBit/google-cloud-python | 226cdf12f5dd69afb0ef665bb9e897d32d56f4b6 | [
"Apache-2.0"
] | 1 | 2021-07-21T17:59:33.000Z | 2021-07-21T17:59:33.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.pubsub_v1.proto import pubsub_pb2_grpc
from google.iam.v1 import iam_policy_pb2
class SubscriberGrpcTransport(object):
"""gRPC transport class providing stubs for
google.pubsub.v1 Subscriber API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
)
def __init__(
self, channel=None, credentials=None, address="pubsub.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"iam_policy_stub": iam_policy_pb2.IAMPolicyStub(channel),
"subscriber_stub": pubsub_pb2_grpc.SubscriberStub(channel),
}
@classmethod
def create_channel(
cls, address="pubsub.googleapis.com:443", credentials=None, **kwargs
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def create_subscription(self):
"""Return the gRPC stub for :meth:`SubscriberClient.create_subscription`.
Creates a subscription to a given topic. See the resource name rules. If
the subscription already exists, returns ``ALREADY_EXISTS``. If the
corresponding topic doesn't exist, returns ``NOT_FOUND``.
If the name is not provided in the request, the server will assign a
random name for this subscription on the same project as the topic,
conforming to the `resource name
format <https://cloud.google.com/pubsub/docs/admin#resource_names>`__.
The generated name is populated in the returned Subscription object.
Note that for REST API requests, you must specify a name in the request.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].CreateSubscription
@property
def get_subscription(self):
"""Return the gRPC stub for :meth:`SubscriberClient.get_subscription`.
Gets the configuration details of a subscription.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].GetSubscription
@property
def update_subscription(self):
"""Return the gRPC stub for :meth:`SubscriberClient.update_subscription`.
Updates an existing subscription. Note that certain properties of a
subscription, such as its topic, are not modifiable.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].UpdateSubscription
@property
def list_subscriptions(self):
"""Return the gRPC stub for :meth:`SubscriberClient.list_subscriptions`.
Lists matching subscriptions.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].ListSubscriptions
@property
def delete_subscription(self):
"""Return the gRPC stub for :meth:`SubscriberClient.delete_subscription`.
Deletes an existing subscription. All messages retained in the
subscription are immediately dropped. Calls to ``Pull`` after deletion
will return ``NOT_FOUND``. After a subscription is deleted, a new one
may be created with the same name, but the new one has no association
with the old subscription or its topic unless the same topic is
specified.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].DeleteSubscription
@property
def modify_ack_deadline(self):
"""Return the gRPC stub for :meth:`SubscriberClient.modify_ack_deadline`.
Modifies the ack deadline for a specific message. This method is useful
to indicate that more time is needed to process a message by the
subscriber, or to make the message available for redelivery if the
processing was interrupted. Note that this does not modify the
subscription-level ``ackDeadlineSeconds`` used for subsequent messages.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].ModifyAckDeadline
@property
def acknowledge(self):
"""Return the gRPC stub for :meth:`SubscriberClient.acknowledge`.
Acknowledges the messages associated with the ``ack_ids`` in the
``AcknowledgeRequest``. The Pub/Sub system can remove the relevant
messages from the subscription.
Acknowledging a message whose ack deadline has expired may succeed, but
such a message may be redelivered later. Acknowledging a message more
than once will not result in an error.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].Acknowledge
@property
def pull(self):
"""Return the gRPC stub for :meth:`SubscriberClient.pull`.
Pulls messages from the server. The server may return ``UNAVAILABLE`` if
there are too many concurrent pull requests pending for the given
subscription.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].Pull
@property
def streaming_pull(self):
"""Return the gRPC stub for :meth:`SubscriberClient.streaming_pull`.
Establishes a stream with the server, which sends messages down to the
client. The client streams acknowledgements and ack deadline
modifications back to the server. The server will close the stream and
return the status on any error. The server may close the stream with
status ``UNAVAILABLE`` to reassign server-side resources, in which case,
the client should re-establish the stream. Flow control can be achieved
by configuring the underlying RPC channel.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].StreamingPull
@property
def modify_push_config(self):
"""Return the gRPC stub for :meth:`SubscriberClient.modify_push_config`.
Modifies the ``PushConfig`` for a specified subscription.
This may be used to change a push subscription to a pull one (signified
by an empty ``PushConfig``) or vice versa, or change the endpoint URL
and other attributes of a push subscription. Messages will accumulate
for delivery continuously through the call regardless of changes to the
``PushConfig``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].ModifyPushConfig
@property
def list_snapshots(self):
"""Return the gRPC stub for :meth:`SubscriberClient.list_snapshots`.
Lists the existing snapshots. Snapshots are used in
<a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
operations, which allow
you to manage message acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing subscription to the state
captured by a snapshot.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].ListSnapshots
@property
def create_snapshot(self):
"""Return the gRPC stub for :meth:`SubscriberClient.create_snapshot`.
Creates a snapshot from the requested subscription. Snapshots are used
in Seek operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages in an
existing subscription to the state captured by a snapshot. If the
snapshot already exists, returns ``ALREADY_EXISTS``. If the requested
subscription doesn't exist, returns ``NOT_FOUND``. If the backlog in the
subscription is too old -- and the resulting snapshot would expire in
less than 1 hour -- then ``FAILED_PRECONDITION`` is returned. See also
the ``Snapshot.expire_time`` field. If the name is not provided in the
request, the server will assign a random name for this snapshot on the
same project as the subscription, conforming to the `resource name
format <https://cloud.google.com/pubsub/docs/admin#resource_names>`__.
The generated name is populated in the returned Snapshot object. Note
that for REST API requests, you must specify a name in the request.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].CreateSnapshot
@property
def update_snapshot(self):
"""Return the gRPC stub for :meth:`SubscriberClient.update_snapshot`.
Updates an existing snapshot. Snapshots are used in
<a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
operations, which allow
you to manage message acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing subscription to the state
captured by a snapshot.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].UpdateSnapshot
@property
def delete_snapshot(self):
"""Return the gRPC stub for :meth:`SubscriberClient.delete_snapshot`.
Removes an existing snapshot. Snapshots are used in
<a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
operations, which allow
you to manage message acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing subscription to the state
captured by a snapshot.<br><br>
When the snapshot is deleted, all messages retained in the snapshot
are immediately dropped. After a snapshot is deleted, a new one may be
created with the same name, but the new one has no association with the old
snapshot or its subscription, unless the same subscription is specified.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].DeleteSnapshot
@property
def seek(self):
"""Return the gRPC stub for :meth:`SubscriberClient.seek`.
Seeks an existing subscription to a point in time or to a given snapshot,
whichever is provided in the request. Snapshots are used in
<a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
operations, which allow
you to manage message acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing subscription to the state
captured by a snapshot. Note that both the subscription and the snapshot
must be on the same topic.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["subscriber_stub"].Seek
@property
def set_iam_policy(self):
"""Return the gRPC stub for :meth:`SubscriberClient.set_iam_policy`.
Sets the access control policy on the specified resource. Replaces any
existing policy.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["iam_policy_stub"].SetIamPolicy
@property
def get_iam_policy(self):
"""Return the gRPC stub for :meth:`SubscriberClient.get_iam_policy`.
Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["iam_policy_stub"].GetIamPolicy
@property
def test_iam_permissions(self):
"""Return the gRPC stub for :meth:`SubscriberClient.test_iam_permissions`.
Returns permissions that a caller has on the specified resource. If the
resource does not exist, this will return an empty set of permissions,
not a NOT\_FOUND error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for authorization
checking. This operation may "fail open" without warning.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["iam_policy_stub"].TestIamPermissions
| 41.366822 | 86 | 0.666365 |
794575e0de65ff6fd4e3dacadc4519912e96c19f | 4,471 | py | Python | sdk/keyvault/azure-keyvault-keys/tests/test_examples_crypto_async.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 1 | 2021-12-07T13:43:54.000Z | 2021-12-07T13:43:54.000Z | sdk/keyvault/azure-keyvault-keys/tests/test_examples_crypto_async.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/keyvault/azure-keyvault-keys/tests/test_examples_crypto_async.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 1 | 2019-04-05T18:17:43.000Z | 2019-04-05T18:17:43.000Z | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.keyvault.keys.crypto.aio import CryptographyClient
from _shared.test_case_async import KeyVaultTestCase
from _test_case import client_setup, get_decorator, KeysTestCase
all_api_versions = get_decorator(is_async=True, vault_only=True)
class TestCryptoExamples(KeysTestCase, KeyVaultTestCase):
def __init__(self, *args, **kwargs):
kwargs["match_body"] = False
super(TestCryptoExamples, self).__init__(*args, **kwargs)
@all_api_versions()
@client_setup
async def test_encrypt_decrypt_async(self, key_client, **kwargs):
credential = self.get_credential(CryptographyClient, is_async=True)
key_name = self.get_resource_name("crypto-test-encrypt-key")
await key_client.create_rsa_key(key_name)
# [START create_client]
# create a CryptographyClient using a KeyVaultKey instance
key = await key_client.get_key(key_name)
crypto_client = CryptographyClient(key, credential)
# or a key's id, which must include a version
key_id = "https://<your vault>.vault.azure.net/keys/<key name>/fe4fdcab688c479a9aa80f01ffeac26"
crypto_client = CryptographyClient(key_id, credential)
# the client and credential should be closed when no longer needed
# (both are also async context managers)
await crypto_client.close()
await credential.close()
# [END create_client]
client = CryptographyClient(key, credential)
# [START encrypt]
from azure.keyvault.keys.crypto import EncryptionAlgorithm
# the result holds the ciphertext and identifies the encryption key and algorithm used
result = await client.encrypt(EncryptionAlgorithm.rsa_oaep, b"plaintext")
print(result.key_id)
print(result.algorithm)
ciphertext = result.ciphertext
# [END encrypt]
# [START decrypt]
from azure.keyvault.keys.crypto import EncryptionAlgorithm
result = await client.decrypt(EncryptionAlgorithm.rsa_oaep, ciphertext)
print(result.plaintext)
# [END decrypt]
@all_api_versions()
@client_setup
async def test_wrap_unwrap_async(self, key_client, **kwargs):
credential = self.get_credential(CryptographyClient, is_async=True)
key_name = self.get_resource_name("crypto-test-wrapping-key")
key = await key_client.create_rsa_key(key_name)
client = CryptographyClient(key, credential)
key_bytes = b"5063e6aaa845f150200547944fd199679c98ed6f99da0a0b2dafeaf1f4684496fd532c1c229968cb9dee44957fcef7ccef59ceda0b362e56bcd78fd3faee5781c623c0bb22b35beabde0664fd30e0e824aba3dd1b0afffc4a3d955ede20cf6a854d52cfd"
# [START wrap_key]
from azure.keyvault.keys.crypto import KeyWrapAlgorithm
# wrap returns a tuple with the wrapped bytes and the metadata required to unwrap the key
result = await client.wrap_key(KeyWrapAlgorithm.rsa_oaep, key_bytes)
print(result.key_id)
print(result.algorithm)
encrypted_key = result.encrypted_key
# [END wrap_key]
# [START unwrap_key]
from azure.keyvault.keys.crypto import KeyWrapAlgorithm
result = await client.unwrap_key(KeyWrapAlgorithm.rsa_oaep, encrypted_key)
# [END unwrap_key]
@all_api_versions()
@client_setup
async def test_sign_verify_async(self, key_client, **kwargs):
credential = self.get_credential(CryptographyClient, is_async=True)
key_name = self.get_resource_name("crypto-test-wrapping-key")
key = await key_client.create_rsa_key(key_name)
client = CryptographyClient(key, credential)
# [START sign]
import hashlib
from azure.keyvault.keys.crypto import SignatureAlgorithm
digest = hashlib.sha256(b"plaintext").digest()
# sign returns the signature and the metadata required to verify it
result = await client.sign(SignatureAlgorithm.rs256, digest)
print(result.key_id)
print(result.algorithm)
signature = result.signature
# [END sign]
# [START verify]
from azure.keyvault.keys.crypto import SignatureAlgorithm
verified = await client.verify(SignatureAlgorithm.rs256, digest, signature)
assert verified.is_valid
# [END verify]
| 39.566372 | 223 | 0.700067 |
794576ec78d3665ee10177db061775cdfbb4e7fd | 880 | py | Python | setup.py | makreft/ldf_parser | 7887a02d310cc5856828f9de4da008aaaa4800c1 | [
"MIT"
] | 2 | 2021-06-01T13:35:42.000Z | 2021-06-06T14:03:29.000Z | setup.py | makreft/ldf_parser | 7887a02d310cc5856828f9de4da008aaaa4800c1 | [
"MIT"
] | 1 | 2021-09-23T20:56:00.000Z | 2021-09-25T20:52:13.000Z | setup.py | makreft/lin_ldf_parser | 7887a02d310cc5856828f9de4da008aaaa4800c1 | [
"MIT"
] | null | null | null | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="lin_ldf_parser",
version="0.01",
description="Simple regex based parser for the lin description file.",
py_modules="lin_ldf_parser",
package_dir={"": "lin_ldf_parser"},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
"numpy ~= 1.20.0",
"pandas ~= 1.2.1",
"pip ~= 21.0.1",
],
extras_require={
"dev": [
"pytest >= 6.2.2",
]
},
url="https://github.com/makreft/ldf_parser",
author="Marco Kreft",
)
| 26.666667 | 74 | 0.5875 |
79457704b3331e4efac83f2ffa134f5111387767 | 405 | py | Python | solutions/make-some-noise/solver.py | HackerDom/qctf-starter-2018 | f4eef0fd41d777661b9fbcc61dcee9709d9f6268 | [
"MIT"
] | 8 | 2018-03-15T12:07:11.000Z | 2020-12-01T15:02:46.000Z | solutions/make-some-noise/solver.py | HackerDom/qctf-starter-2018 | f4eef0fd41d777661b9fbcc61dcee9709d9f6268 | [
"MIT"
] | 17 | 2020-01-28T22:17:42.000Z | 2022-03-11T23:18:09.000Z | solutions/make-some-noise/solver.py | HackerDom/qctf-starter-2018 | f4eef0fd41d777661b9fbcc61dcee9709d9f6268 | [
"MIT"
] | 2 | 2018-11-26T18:54:27.000Z | 2018-12-05T17:37:32.000Z | import requests
import numpy as np
def main():
results=[]
for i in range(100):
r=requests.get('https://make-some-noise.contest.qctf.ru/2TjUAurc7P60IBLM2qCe')
results.append([ord(x) for x in r.text])
flag = (chr(int(round(np.mean(list(map(lambda arr:arr[i], results)))))) for i in range(len(results[0])))
print(''.join(flag))
if __name__ == '__main__':
main() | 28.928571 | 112 | 0.624691 |
794577489358e845905c1748449a65562d341a83 | 63,944 | py | Python | apps/challenges/aws_utils.py | Happytocode24/EvalAI | 39bd56239d2eef5a1b4f16ab0eb44be7b5fbdc07 | [
"BSD-3-Clause"
] | null | null | null | apps/challenges/aws_utils.py | Happytocode24/EvalAI | 39bd56239d2eef5a1b4f16ab0eb44be7b5fbdc07 | [
"BSD-3-Clause"
] | null | null | null | apps/challenges/aws_utils.py | Happytocode24/EvalAI | 39bd56239d2eef5a1b4f16ab0eb44be7b5fbdc07 | [
"BSD-3-Clause"
] | null | null | null | import json
import logging
import os
import random
import string
import uuid
import yaml
from botocore.exceptions import ClientError
from django.conf import settings
from django.core import serializers
from django.core.files.temp import NamedTemporaryFile
from http import HTTPStatus
from .challenge_notification_util import (
construct_and_send_worker_start_mail,
construct_and_send_eks_cluster_creation_mail,
)
from base.utils import get_boto3_client, send_email
from evalai.celery import app
from accounts.models import JwtToken
logger = logging.getLogger(__name__)
DJANGO_SETTINGS_MODULE = os.environ.get("DJANGO_SETTINGS_MODULE")
ENV = DJANGO_SETTINGS_MODULE.split(".")[-1]
EVALAI_DNS = os.environ.get("SERVICE_DNS")
aws_keys = {
"AWS_ACCOUNT_ID": os.environ.get("AWS_ACCOUNT_ID", "x"),
"AWS_ACCESS_KEY_ID": os.environ.get("AWS_ACCESS_KEY_ID", "x"),
"AWS_SECRET_ACCESS_KEY": os.environ.get("AWS_SECRET_ACCESS_KEY", "x"),
"AWS_REGION": os.environ.get("AWS_DEFAULT_REGION", "us-east-1"),
"AWS_STORAGE_BUCKET_NAME": os.environ.get(
"AWS_STORAGE_BUCKET_NAME", "evalai-s3-bucket"
),
}
COMMON_SETTINGS_DICT = {
"EXECUTION_ROLE_ARN": os.environ.get(
"EXECUTION_ROLE_ARN",
"arn:aws:iam::{}:role/evalaiTaskExecutionRole".format(
aws_keys["AWS_ACCOUNT_ID"]
),
),
"WORKER_IMAGE": os.environ.get(
"WORKER_IMAGE",
"{}.dkr.ecr.us-east-1.amazonaws.com/evalai-{}-worker:latest".format(
aws_keys["AWS_ACCOUNT_ID"], ENV
),
),
"CODE_UPLOAD_WORKER_IMAGE": os.environ.get(
"CODE_UPLOAD_WORKER_IMAGE",
"{}.dkr.ecr.us-east-1.amazonaws.com/evalai-{}-worker:latest".format(
aws_keys["AWS_ACCOUNT_ID"], ENV
),
),
"CIDR": os.environ.get("CIDR"),
"CLUSTER": os.environ.get("CLUSTER", "evalai-prod-cluster"),
"DJANGO_SERVER": os.environ.get("DJANGO_SERVER", "localhost"),
"EVALAI_API_SERVER": os.environ.get("EVALAI_API_SERVER", "localhost"),
"DEBUG": settings.DEBUG,
"EMAIL_HOST": settings.EMAIL_HOST,
"EMAIL_HOST_PASSWORD": settings.EMAIL_HOST_PASSWORD,
"EMAIL_HOST_USER": settings.EMAIL_HOST_USER,
"EMAIL_PORT": settings.EMAIL_PORT,
"EMAIL_USE_TLS": settings.EMAIL_USE_TLS,
"MEMCACHED_LOCATION": os.environ.get("MEMCACHED_LOCATION", None),
"RDS_DB_NAME": settings.DATABASES["default"]["NAME"],
"RDS_HOSTNAME": settings.DATABASES["default"]["HOST"],
"RDS_PASSWORD": settings.DATABASES["default"]["PASSWORD"],
"RDS_USERNAME": settings.DATABASES["default"]["USER"],
"RDS_PORT": settings.DATABASES["default"]["PORT"],
"SECRET_KEY": settings.SECRET_KEY,
"SENTRY_URL": os.environ.get("SENTRY_URL"),
}
VPC_DICT = {
"SUBNET_1": os.environ.get("SUBNET_1", "subnet1"),
"SUBNET_2": os.environ.get("SUBNET_2", "subnet2"),
"SUBNET_SECURITY_GROUP": os.environ.get("SUBNET_SECURITY_GROUP", "sg"),
}
task_definition = """
{{
"family":"{queue_name}",
"executionRoleArn":"{EXECUTION_ROLE_ARN}",
"networkMode":"awsvpc",
"containerDefinitions":[
{{
"name": "{container_name}",
"image": "{WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCOUNT_ID",
"value": "{AWS_ACCOUNT_ID}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "AWS_STORAGE_BUCKET_NAME",
"value": "{AWS_STORAGE_BUCKET_NAME}"
}},
{{
"name": "CHALLENGE_PK",
"value": "{challenge_pk}"
}},
{{
"name": "CHALLENGE_QUEUE",
"value": "{queue_name}"
}},
{{
"name": "DJANGO_SERVER",
"value": "{DJANGO_SERVER}"
}},
{{
"name": "DJANGO_SETTINGS_MODULE",
"value": "settings.{ENV}"
}},
{{
"name": "DEBUG",
"value": "{DEBUG}"
}},
{{
"name": "EMAIL_HOST",
"value": "{EMAIL_HOST}"
}},
{{
"name": "EMAIL_HOST_PASSWORD",
"value": "{EMAIL_HOST_PASSWORD}"
}},
{{
"name": "EMAIL_HOST_USER",
"value": "{EMAIL_HOST_USER}"
}},
{{
"name": "EMAIL_PORT",
"value": "{EMAIL_PORT}"
}},
{{
"name": "EMAIL_USE_TLS",
"value": "{EMAIL_USE_TLS}"
}},
{{
"name": "MEMCACHED_LOCATION",
"value": "{MEMCACHED_LOCATION}"
}},
{{
"name": "PYTHONUNBUFFERED",
"value": "1"
}},
{{
"name": "RDS_DB_NAME",
"value": "{RDS_DB_NAME}"
}},
{{
"name": "RDS_HOSTNAME",
"value": "{RDS_HOSTNAME}"
}},
{{
"name": "RDS_PASSWORD",
"value": "{RDS_PASSWORD}"
}},
{{
"name": "RDS_USERNAME",
"value": "{RDS_USERNAME}"
}},
{{
"name": "RDS_PORT",
"value": "{RDS_PORT}"
}},
{{
"name": "SECRET_KEY",
"value": "{SECRET_KEY}"
}},
{{
"name": "SENTRY_URL",
"value": "{SENTRY_URL}"
}},
{{
"name": "AWS_SES_REGION_NAME",
"value": "{AWS_SES_REGION_NAME}"
}},
{{
"name": "AWS_SES_REGION_ENDPOINT",
"value": "{AWS_SES_REGION_ENDPOINT}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
],
"requiresCompatibilities":[
"FARGATE"
],
"cpu": "{CPU}",
"memory": "{MEMORY}",
}}
"""
task_definition_code_upload_worker = """
{{
"family":"{queue_name}",
"executionRoleArn":"{EXECUTION_ROLE_ARN}",
"networkMode":"awsvpc",
"containerDefinitions":[
{{
"name": "{code_upload_container_name}",
"image": "{CODE_UPLOAD_WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "CLUSTER_NAME",
"value": "{cluster_name}"
}},
{{
"name": "CLUSTER_ENDPOINT",
"value": "{cluster_endpoint}"
}},
{{
"name": "CERTIFICATE",
"value": "{certificate}"
}},
{{
"name": "CIDR",
"value": "{CIDR}"
}},
{{
"name": "QUEUE_NAME",
"value": "{queue_name}"
}},
{{
"name": "EVALAI_API_SERVER",
"value": "{EVALAI_API_SERVER}"
}},
{{
"name": "AUTH_TOKEN",
"value": "{auth_token}"
}},
{{
"name": "EVALAI_DNS",
"value": "{EVALAI_DNS}"
}},
{{
"name": "EFS_ID",
"value": "{EFS_ID}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
],
"requiresCompatibilities":[
"FARGATE"
],
"cpu": "{CPU}",
"memory": "{MEMORY}",
}}
"""
task_definition_static_code_upload_worker = """
{{
"family":"{queue_name}",
"executionRoleArn":"{EXECUTION_ROLE_ARN}",
"networkMode":"awsvpc",
"containerDefinitions":[
{{
"name": "{code_upload_container_name}",
"image": "{CODE_UPLOAD_WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "CLUSTER_NAME",
"value": "{cluster_name}"
}},
{{
"name": "CLUSTER_ENDPOINT",
"value": "{cluster_endpoint}"
}},
{{
"name": "CERTIFICATE",
"value": "{certificate}"
}},
{{
"name": "CIDR",
"value": "{CIDR}"
}},
{{
"name": "QUEUE_NAME",
"value": "{queue_name}"
}},
{{
"name": "EVALAI_API_SERVER",
"value": "{EVALAI_API_SERVER}"
}},
{{
"name": "AUTH_TOKEN",
"value": "{auth_token}"
}},
{{
"name": "EVALAI_DNS",
"value": "{EVALAI_DNS}"
}},
{{
"name": "EFS_ID",
"value": "{EFS_ID}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}},
{{
"name": "{container_name}",
"image": "{WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCOUNT_ID",
"value": "{AWS_ACCOUNT_ID}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "AWS_STORAGE_BUCKET_NAME",
"value": "{AWS_STORAGE_BUCKET_NAME}"
}},
{{
"name": "CHALLENGE_PK",
"value": "{challenge_pk}"
}},
{{
"name": "CHALLENGE_QUEUE",
"value": "{queue_name}"
}},
{{
"name": "DJANGO_SERVER",
"value": "{DJANGO_SERVER}"
}},
{{
"name": "DJANGO_SETTINGS_MODULE",
"value": "settings.{ENV}"
}},
{{
"name": "DEBUG",
"value": "{DEBUG}"
}},
{{
"name": "EMAIL_HOST",
"value": "{EMAIL_HOST}"
}},
{{
"name": "EMAIL_HOST_PASSWORD",
"value": "{EMAIL_HOST_PASSWORD}"
}},
{{
"name": "EMAIL_HOST_USER",
"value": "{EMAIL_HOST_USER}"
}},
{{
"name": "EMAIL_PORT",
"value": "{EMAIL_PORT}"
}},
{{
"name": "EMAIL_USE_TLS",
"value": "{EMAIL_USE_TLS}"
}},
{{
"name": "MEMCACHED_LOCATION",
"value": "{MEMCACHED_LOCATION}"
}},
{{
"name": "PYTHONUNBUFFERED",
"value": "1"
}},
{{
"name": "RDS_DB_NAME",
"value": "{RDS_DB_NAME}"
}},
{{
"name": "RDS_HOSTNAME",
"value": "{RDS_HOSTNAME}"
}},
{{
"name": "RDS_PASSWORD",
"value": "{RDS_PASSWORD}"
}},
{{
"name": "RDS_USERNAME",
"value": "{RDS_USERNAME}"
}},
{{
"name": "RDS_PORT",
"value": "{RDS_PORT}"
}},
{{
"name": "SECRET_KEY",
"value": "{SECRET_KEY}"
}},
{{
"name": "SENTRY_URL",
"value": "{SENTRY_URL}"
}},
{{
"name": "AWS_SES_REGION_NAME",
"value": "{AWS_SES_REGION_NAME}"
}},
{{
"name": "AWS_SES_REGION_ENDPOINT",
"value": "{AWS_SES_REGION_ENDPOINT}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
],
"requiresCompatibilities":[
"FARGATE"
],
"cpu": "{CPU}",
"memory": "{MEMORY}",
}}
"""
service_definition = """
{{
"cluster":"{CLUSTER}",
"serviceName":"{service_name}",
"taskDefinition":"{task_def_arn}",
"desiredCount":1,
"clientToken":"{client_token}",
"launchType":"FARGATE",
"platformVersion":"LATEST",
"networkConfiguration":{{
"awsvpcConfiguration": {{
"subnets": [
"{SUBNET_1}",
"{SUBNET_2}",
],
'securityGroups': [
"{SUBNET_SECURITY_GROUP}",
],
"assignPublicIp": "ENABLED"
}}
}},
"schedulingStrategy":"REPLICA",
"deploymentController":{{
"type": "ECS"
}},
}}
"""
update_service_args = """
{{
"cluster":"{CLUSTER}",
"service":"{service_name}",
"desiredCount":num_of_tasks,
"taskDefinition":"{task_def_arn}",
"forceNewDeployment":{force_new_deployment}
}}
"""
delete_service_args = """
{{
"cluster": "{CLUSTER}",
"service": "{service_name}",
"force": False
}}
"""
def get_code_upload_setup_meta_for_challenge(challenge_pk):
"""
Return the EKS cluster network and arn meta for a challenge
Arguments:
challenge_pk {int} -- challenge pk for which credentails are to be fetched
Returns:
code_upload_meta {dict} -- Dict containing cluster network and arn meta
"""
from .models import ChallengeEvaluationCluster
from .utils import get_challenge_model
challenge = get_challenge_model(challenge_pk)
if challenge.use_host_credentials:
challenge_evaluation_cluster = ChallengeEvaluationCluster.objects.get(
challenge=challenge
)
code_upload_meta = {
"SUBNET_1": challenge_evaluation_cluster.subnet_1_id,
"SUBNET_2": challenge_evaluation_cluster.subnet_2_id,
"SUBNET_SECURITY_GROUP": challenge_evaluation_cluster.security_group_id,
"EKS_NODEGROUP_ROLE_ARN": challenge_evaluation_cluster.node_group_arn_role,
"EKS_CLUSTER_ROLE_ARN": challenge_evaluation_cluster.eks_arn_role,
}
else:
code_upload_meta = {
"SUBNET_1": VPC_DICT["SUBNET_1"],
"SUBNET_2": VPC_DICT["SUBNET_2"],
"SUBNET_SECURITY_GROUP": VPC_DICT["SUBNET_SECURITY_GROUP"],
"EKS_NODEGROUP_ROLE_ARN": settings.EKS_NODEGROUP_ROLE_ARN,
"EKS_CLUSTER_ROLE_ARN": settings.EKS_CLUSTER_ROLE_ARN,
}
return code_upload_meta
def get_log_group_name(challenge_pk):
log_group_name = "challenge-pk-{}-{}-workers".format(
challenge_pk, settings.ENVIRONMENT
)
return log_group_name
def client_token_generator(challenge_pk):
"""
Returns a 32 characters long client token to ensure idempotency with create_service boto3 requests.
Parameters: None
Returns:
str: string of size 32 composed of digits and letters
"""
remaining_chars = 32 - len(str(challenge_pk))
random_char_string = "".join(
random.choices(string.ascii_letters + string.digits, k=remaining_chars)
)
client_token = f"{str(challenge_pk)}{random_char_string}"
return client_token
def register_task_def_by_challenge_pk(client, queue_name, challenge):
"""
Registers the task definition of the worker for a challenge, before creating a service.
Parameters:
client (boto3.client): the client used for making requests to ECS.
queue_name (str): queue_name is the queue field of the Challenge model used in many parameters fof the task def.
challenge (<class 'challenges.models.Challenge'>): The challenge object for whom the task definition is being registered.
Returns:
dict: A dict of the task definition and it's ARN if succesful, and an error dictionary if not
"""
container_name = "worker_{}".format(queue_name)
code_upload_container_name = "code_upload_worker_{}".format(queue_name)
worker_cpu_cores = challenge.worker_cpu_cores
worker_memory = challenge.worker_memory
log_group_name = get_log_group_name(challenge.pk)
execution_role_arn = COMMON_SETTINGS_DICT["EXECUTION_ROLE_ARN"]
AWS_SES_REGION_NAME = settings.AWS_SES_REGION_NAME
AWS_SES_REGION_ENDPOINT = settings.AWS_SES_REGION_ENDPOINT
if execution_role_arn:
from .utils import get_aws_credentials_for_challenge
challenge_aws_keys = get_aws_credentials_for_challenge(challenge.pk)
if challenge.is_docker_based:
from .models import ChallengeEvaluationCluster
# Cluster detail to be used by code-upload-worker
try:
cluster_details = ChallengeEvaluationCluster.objects.get(
challenge=challenge
)
cluster_name = cluster_details.name
cluster_endpoint = cluster_details.cluster_endpoint
cluster_certificate = cluster_details.cluster_ssl
efs_id = cluster_details.efs_id
except ClientError as e:
logger.exception(e)
return e.response
# challenge host auth token to be used by code-upload-worker
token = JwtToken.objects.get(user=challenge.creator.created_by)
if challenge.is_static_dataset_docker_based_challenge:
definition = task_definition_static_code_upload_worker.format(
queue_name=queue_name,
container_name=container_name,
code_upload_container_name=code_upload_container_name,
ENV=ENV,
challenge_pk=challenge.pk,
auth_token=token.refresh_token,
cluster_name=cluster_name,
cluster_endpoint=cluster_endpoint,
certificate=cluster_certificate,
CPU=worker_cpu_cores,
MEMORY=worker_memory,
log_group_name=log_group_name,
EVALAI_DNS=EVALAI_DNS,
EFS_ID=efs_id,
AWS_SES_REGION_NAME=AWS_SES_REGION_NAME,
AWS_SES_REGION_ENDPOINT=AWS_SES_REGION_ENDPOINT,
**COMMON_SETTINGS_DICT,
**challenge_aws_keys,
)
else:
definition = task_definition_code_upload_worker.format(
queue_name=queue_name,
code_upload_container_name=code_upload_container_name,
ENV=ENV,
challenge_pk=challenge.pk,
auth_token=token.refresh_token,
cluster_name=cluster_name,
cluster_endpoint=cluster_endpoint,
certificate=cluster_certificate,
CPU=worker_cpu_cores,
MEMORY=worker_memory,
log_group_name=log_group_name,
EVALAI_DNS=EVALAI_DNS,
EFS_ID=efs_id,
**COMMON_SETTINGS_DICT,
**challenge_aws_keys,
)
else:
definition = task_definition.format(
queue_name=queue_name,
container_name=container_name,
ENV=ENV,
challenge_pk=challenge.pk,
CPU=worker_cpu_cores,
MEMORY=worker_memory,
log_group_name=log_group_name,
AWS_SES_REGION_NAME=AWS_SES_REGION_NAME,
AWS_SES_REGION_ENDPOINT=AWS_SES_REGION_ENDPOINT,
**COMMON_SETTINGS_DICT,
**challenge_aws_keys,
)
definition = eval(definition)
if not challenge.task_def_arn:
try:
response = client.register_task_definition(**definition)
if (
response["ResponseMetadata"]["HTTPStatusCode"]
== HTTPStatus.OK
):
task_def_arn = response["taskDefinition"][
"taskDefinitionArn"
]
challenge.task_def_arn = task_def_arn
challenge.save()
return response
except ClientError as e:
logger.exception(e)
return e.response
else:
message = "Error. Task definition already registered for challenge {}.".format(
challenge.pk
)
return {
"Error": message,
"ResponseMetadata": {"HTTPStatusCode": HTTPStatus.BAD_REQUEST},
}
else:
message = "Please ensure that the TASK_EXECUTION_ROLE_ARN is appropriately passed as an environment varible."
return {
"Error": message,
"ResponseMetadata": {"HTTPStatusCode": HTTPStatus.BAD_REQUEST},
}
def create_service_by_challenge_pk(client, challenge, client_token):
"""
Creates the worker service for a challenge, and sets the number of workers to one.
Parameters:
client (boto3.client): the client used for making requests to ECS
challenge (<class 'challenges.models.Challenge'>): The challenge object for whom the task definition is being registered.
client_token (str): The client token generated by client_token_generator()
Returns:
dict: The response returned by the create_service method from boto3. If unsuccesful, returns an error dictionary
"""
queue_name = challenge.queue
service_name = "{}_service".format(queue_name)
if (
challenge.workers is None
): # Verify if the challenge is new (i.e, service not yet created.).
if challenge.task_def_arn == "" or challenge.task_def_arn is None:
response = register_task_def_by_challenge_pk(
client, queue_name, challenge
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
return response
task_def_arn = challenge.task_def_arn
definition = service_definition.format(
CLUSTER=COMMON_SETTINGS_DICT["CLUSTER"],
service_name=service_name,
task_def_arn=task_def_arn,
client_token=client_token,
**VPC_DICT,
)
definition = eval(definition)
try:
response = client.create_service(**definition)
if response["ResponseMetadata"]["HTTPStatusCode"] == HTTPStatus.OK:
challenge.workers = 1
challenge.save()
return response
except ClientError as e:
logger.exception(e)
return e.response
else:
message = "Worker service for challenge {} already exists. Please scale, stop or delete.".format(
challenge.pk
)
return {
"Error": message,
"ResponseMetadata": {"HTTPStatusCode": HTTPStatus.BAD_REQUEST},
}
def update_service_by_challenge_pk(
client, challenge, num_of_tasks, force_new_deployment=False
):
"""
Updates the worker service for a challenge, and scales the number of workers to num_of_tasks.
Parameters:
client (boto3.client): the client used for making requests to ECS
challenge (<class 'challenges.models.Challenge'>): The challenge object for whom the task definition is being registered.
num_of_tasks (int): Number of workers to scale to for the challenge.
force_new_deployment (bool): Set True (mainly for restarting) to specify if you want to redploy with the latest image from ECR. Default is False.
Returns:
dict: The response returned by the update_service method from boto3. If unsuccesful, returns an error dictionary
"""
queue_name = challenge.queue
service_name = "{}_service".format(queue_name)
task_def_arn = challenge.task_def_arn
kwargs = update_service_args.format(
CLUSTER=COMMON_SETTINGS_DICT["CLUSTER"],
service_name=service_name,
task_def_arn=task_def_arn,
force_new_deployment=force_new_deployment,
)
kwargs = eval(kwargs)
try:
response = client.update_service(**kwargs)
if response["ResponseMetadata"]["HTTPStatusCode"] == HTTPStatus.OK:
challenge.workers = num_of_tasks
challenge.save()
return response
except ClientError as e:
logger.exception(e)
return e.response
def delete_service_by_challenge_pk(challenge):
"""
Deletes the workers service of a challenge.
Before deleting, it scales down the number of workers in the service to 0, then proceeds to delete the service.
Parameters:
challenge (<class 'challenges.models.Challenge'>): The challenge object for whom the task definition is being registered.
Returns:
dict: The response returned by the delete_service method from boto3
"""
client = get_boto3_client("ecs", aws_keys)
queue_name = challenge.queue
service_name = "{}_service".format(queue_name)
kwargs = delete_service_args.format(
CLUSTER=COMMON_SETTINGS_DICT["CLUSTER"],
service_name=service_name,
force=True,
)
kwargs = eval(kwargs)
try:
if challenge.workers != 0:
response = update_service_by_challenge_pk(
client, challenge, 0, False
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
return response
response = client.delete_service(**kwargs)
if response["ResponseMetadata"]["HTTPStatusCode"] == HTTPStatus.OK:
challenge.workers = None
challenge.save()
client.deregister_task_definition(
taskDefinition=challenge.task_def_arn
)
challenge.task_def_arn = ""
challenge.save()
return response
except ClientError as e:
logger.exception(e)
return e.response
def service_manager(
client, challenge, num_of_tasks=None, force_new_deployment=False
):
"""
This method determines if the challenge is new or not, and accordingly calls <update or create>_by_challenge_pk.
Called by: Start, Stop & Scale methods for multiple workers.
Parameters:
client (boto3.client): the client used for making requests to ECS.
challenge (): The challenge object for whom the task definition is being registered.
num_of_tasks: The number of workers to scale to (relevant only if the challenge is not new).
default: None
Returns:
dict: The response returned by the respective functions update_service_by_challenge_pk or create_service_by_challenge_pk
"""
if challenge.workers is not None:
response = update_service_by_challenge_pk(
client, challenge, num_of_tasks, force_new_deployment
)
return response
else:
client_token = client_token_generator(challenge.pk)
response = create_service_by_challenge_pk(
client, challenge, client_token
)
return response
def start_workers(queryset):
"""
The function called by the admin action method to start all the selected workers.
Calls the service_manager method. Before calling, checks if all the workers are incactive.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully started.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be started on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
client = get_boto3_client("ecs", aws_keys)
count = 0
failures = []
for challenge in queryset:
if (challenge.workers == 0) or (challenge.workers is None):
response = service_manager(
client, challenge=challenge, num_of_tasks=1
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{
"message": response["Error"],
"challenge_pk": challenge.pk,
}
)
continue
count += 1
else:
response = "Please select challenge with inactive workers only."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
return {"count": count, "failures": failures}
def stop_workers(queryset):
"""
The function called by the admin action method to stop all the selected workers.
Calls the service_manager method. Before calling, verifies that the challenge is not new, and is active.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully stopped.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be stopped on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
client = get_boto3_client("ecs", aws_keys)
count = 0
failures = []
for challenge in queryset:
if (challenge.workers is not None) and (challenge.workers > 0):
response = service_manager(
client, challenge=challenge, num_of_tasks=0
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{
"message": response["Error"],
"challenge_pk": challenge.pk,
}
)
continue
count += 1
else:
response = "Please select challenges with active workers only."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
return {"count": count, "failures": failures}
def scale_workers(queryset, num_of_tasks):
"""
The function called by the admin action method to scale all the selected workers.
Calls the service_manager method. Before calling, checks if the target scaling number is different than current.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully started.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be scaled on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
client = get_boto3_client("ecs", aws_keys)
count = 0
failures = []
for challenge in queryset:
if challenge.workers is None:
response = "Please start worker(s) before scaling."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
continue
if num_of_tasks == challenge.workers:
response = "Please scale to a different number. Challenge has {} worker(s).".format(
num_of_tasks
)
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
continue
response = service_manager(
client, challenge=challenge, num_of_tasks=num_of_tasks
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{"message": response["Error"], "challenge_pk": challenge.pk}
)
continue
count += 1
return {"count": count, "failures": failures}
def delete_workers(queryset):
"""
The function called by the admin action method to delete all the selected workers.
Calls the delete_service_by_challenge_pk method. Before calling, verifies that the challenge is not new.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully stopped.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be deleted on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
count = 0
failures = []
for challenge in queryset:
if challenge.workers is not None:
response = delete_service_by_challenge_pk(challenge=challenge)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{
"message": response["Error"],
"challenge_pk": challenge.pk,
}
)
continue
count += 1
log_group_name = get_log_group_name(challenge.pk)
delete_log_group(log_group_name)
else:
response = "Please select challenges with active workers only."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
return {"count": count, "failures": failures}
def restart_workers(queryset):
"""
The function called by the admin action method to restart all the selected workers.
Calls the service_manager method. Before calling, verifies that the challenge worker(s) is(are) active.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully stopped.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be restarted on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
client = get_boto3_client("ecs", aws_keys)
count = 0
failures = []
for challenge in queryset:
if challenge.is_docker_based:
response = "Sorry. This feature is not available for code upload/docker based challenges."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
elif (challenge.workers is not None) and (challenge.workers > 0):
response = service_manager(
client,
challenge=challenge,
num_of_tasks=challenge.workers,
force_new_deployment=True,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{
"message": response["Error"],
"challenge_pk": challenge.pk,
}
)
continue
count += 1
else:
response = "Please select challenges with active workers only."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
return {"count": count, "failures": failures}
def restart_workers_signal_callback(sender, instance, field_name, **kwargs):
"""
Called when either evaluation_script or test_annotation_script for challenge
is updated, to restart the challenge workers.
"""
if settings.DEBUG:
return
prev = getattr(instance, "_original_{}".format(field_name))
curr = getattr(instance, "{}".format(field_name))
if field_name == "evaluation_script":
instance._original_evaluation_script = curr
elif field_name == "test_annotation":
instance._original_test_annotation = curr
if prev != curr:
challenge = None
if field_name == "test_annotation":
challenge = instance.challenge
else:
challenge = instance
response = restart_workers([challenge])
count, failures = response["count"], response["failures"]
logger.info(
"The worker service for challenge {} was restarted, as {} was changed.".format(
challenge.pk, field_name
)
)
if count != 1:
logger.warning(
"Worker(s) for challenge {} couldn't restart! Error: {}".format(
challenge.id, failures[0]["message"]
)
)
else:
challenge_url = "{}/web/challenges/challenge-page/{}".format(
settings.EVALAI_API_SERVER, challenge.id
)
challenge_manage_url = (
"{}/web/challenges/challenge-page/{}/manage".format(
settings.EVALAI_API_SERVER, challenge.id
)
)
if field_name == "test_annotation":
file_updated = "Test Annotation"
elif field_name == "evaluation_script":
file_updated = "Evaluation script"
template_data = {
"CHALLENGE_NAME": challenge.title,
"CHALLENGE_MANAGE_URL": challenge_manage_url,
"CHALLENGE_URL": challenge_url,
"FILE_UPDATED": file_updated,
}
if challenge.image:
template_data["CHALLENGE_IMAGE_URL"] = challenge.image.url
template_id = settings.SENDGRID_SETTINGS.get("TEMPLATES").get(
"WORKER_RESTART_EMAIL"
)
# Send email notification only when inform_hosts is true
if challenge.inform_hosts:
emails = challenge.creator.get_all_challenge_host_email()
for email in emails:
send_email(
sender=settings.CLOUDCV_TEAM_EMAIL,
recipient=email,
template_id=template_id,
template_data=template_data,
)
def get_logs_from_cloudwatch(
log_group_name, log_stream_prefix, start_time, end_time, pattern
):
"""
To fetch logs of a container from cloudwatch within a specific time frame.
"""
client = get_boto3_client("logs", aws_keys)
logs = []
if settings.DEBUG:
logs = [
"The worker logs in the development environment are available on the terminal. Please use docker-compose logs -f worker to view the logs."
]
else:
try:
response = client.filter_log_events(
logGroupName=log_group_name,
logStreamNamePrefix=log_stream_prefix,
startTime=start_time,
endTime=end_time,
filterPattern=pattern,
)
for event in response["events"]:
logs.append(event["message"])
except Exception as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
return logs
logger.exception(e)
return [
f"There is an error in displaying logs. Please find the full error traceback here {e}"
]
return logs
def delete_log_group(log_group_name):
if settings.DEBUG:
pass
else:
try:
client = get_boto3_client("logs", aws_keys)
client.delete_log_group(logGroupName=log_group_name)
except Exception as e:
logger.exception(e)
@app.task
def create_eks_nodegroup(challenge, cluster_name):
"""
Creates a nodegroup when a EKS cluster is created by the EvalAI admin
Arguments:
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model calling the post hook
cluster_name {str} -- name of eks cluster
"""
from .utils import get_aws_credentials_for_challenge
for obj in serializers.deserialize("json", challenge):
challenge_obj = obj.object
environment_suffix = "{}-{}".format(challenge_obj.pk, settings.ENVIRONMENT)
nodegroup_name = "{}-{}-nodegroup".format(
challenge_obj.title.replace(" ", "-"), environment_suffix
)
challenge_aws_keys = get_aws_credentials_for_challenge(challenge_obj.pk)
client = get_boto3_client("eks", challenge_aws_keys)
cluster_meta = get_code_upload_setup_meta_for_challenge(challenge_obj.pk)
# TODO: Move the hardcoded cluster configuration such as the
# instance_type, subnets, AMI to challenge configuration later.
try:
response = client.create_nodegroup(
clusterName=cluster_name,
nodegroupName=nodegroup_name,
scalingConfig={
"minSize": challenge_obj.min_worker_instance,
"maxSize": challenge_obj.max_worker_instance,
"desiredSize": challenge_obj.desired_worker_instance,
},
diskSize=challenge_obj.worker_disk_size,
subnets=[cluster_meta["SUBNET_1"], cluster_meta["SUBNET_2"]],
instanceTypes=[challenge_obj.worker_instance_type],
amiType=challenge_obj.worker_ami_type,
nodeRole=cluster_meta["EKS_NODEGROUP_ROLE_ARN"],
)
logger.info("Nodegroup create: {}".format(response))
except ClientError as e:
logger.exception(e)
return
waiter = client.get_waiter("nodegroup_active")
waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name)
construct_and_send_eks_cluster_creation_mail(challenge_obj)
# starting the code-upload-worker
client = get_boto3_client("ecs", aws_keys)
client_token = client_token_generator(challenge_obj.pk)
create_service_by_challenge_pk(client, challenge_obj, client_token)
@app.task
def setup_eks_cluster(challenge):
"""
Creates EKS and NodeGroup ARN roles
Arguments:
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model calling the post hook
"""
from .models import ChallengeEvaluationCluster
from .serializers import ChallengeEvaluationClusterSerializer
from .utils import get_aws_credentials_for_challenge
for obj in serializers.deserialize("json", challenge):
challenge_obj = obj.object
challenge_aws_keys = get_aws_credentials_for_challenge(challenge_obj.pk)
client = get_boto3_client("iam", challenge_aws_keys)
environment_suffix = "{}-{}".format(challenge_obj.pk, settings.ENVIRONMENT)
eks_role_name = "evalai-code-upload-eks-role-{}".format(environment_suffix)
eks_arn_role = None
try:
response = client.create_role(
RoleName=eks_role_name,
Description="Amazon EKS cluster role with managed policy",
AssumeRolePolicyDocument=json.dumps(
settings.EKS_CLUSTER_TRUST_RELATION
),
)
eks_arn_role = response["Role"]["Arn"]
except ClientError as e:
logger.exception(e)
return
waiter = client.get_waiter("role_exists")
waiter.wait(RoleName=eks_role_name)
try:
# Attach AWS managed EKS cluster policy to the role
response = client.attach_role_policy(
RoleName=eks_role_name,
PolicyArn=settings.EKS_CLUSTER_POLICY,
)
except ClientError as e:
logger.exception(e)
return
node_group_role_name = "evalai-code-upload-nodegroup-role-{}".format(
environment_suffix
)
node_group_arn_role = None
try:
response = client.create_role(
RoleName=node_group_role_name,
Description="Amazon EKS node group role with managed policy",
AssumeRolePolicyDocument=json.dumps(
settings.EKS_NODE_GROUP_TRUST_RELATION
),
)
node_group_arn_role = response["Role"]["Arn"]
except ClientError as e:
logger.exception(e)
return
waiter = client.get_waiter("role_exists")
waiter.wait(RoleName=node_group_role_name)
task_execution_policies = settings.EKS_NODE_GROUP_POLICIES
for policy_arn in task_execution_policies:
try:
# Attach AWS managed EKS worker node policy to the role
response = client.attach_role_policy(
RoleName=node_group_role_name,
PolicyArn=policy_arn,
)
except ClientError as e:
logger.exception(e)
return
# Create custom ECR all access policy and attach to node_group_role
ecr_all_access_policy_name = "AWS-ECR-Full-Access-{}".format(
environment_suffix
)
ecr_all_access_policy_arn = None
try:
response = client.create_policy(
PolicyName=ecr_all_access_policy_name,
PolicyDocument=json.dumps(settings.ECR_ALL_ACCESS_POLICY_DOCUMENT),
)
ecr_all_access_policy_arn = response["Policy"]["Arn"]
waiter = client.get_waiter("policy_exists")
waiter.wait(PolicyArn=ecr_all_access_policy_arn)
# Attach custom ECR policy
response = client.attach_role_policy(
RoleName=node_group_role_name, PolicyArn=ecr_all_access_policy_arn
)
except ClientError as e:
logger.exception(e)
return
try:
challenge_evaluation_cluster = ChallengeEvaluationCluster.objects.get(
challenge=challenge_obj
)
serializer = ChallengeEvaluationClusterSerializer(
challenge_evaluation_cluster,
data={
"eks_arn_role": eks_arn_role,
"node_group_arn_role": node_group_arn_role,
"ecr_all_access_policy_arn": ecr_all_access_policy_arn,
},
partial=True,
)
if serializer.is_valid():
serializer.save()
# Create eks cluster vpc and subnets
create_eks_cluster_subnets.delay(challenge)
except Exception as e:
logger.exception(e)
return
@app.task
def create_eks_cluster_subnets(challenge):
"""
Creates EKS and NodeGroup ARN roles
Arguments:
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model calling the post hook
"""
from .models import ChallengeEvaluationCluster
from .serializers import ChallengeEvaluationClusterSerializer
from .utils import get_aws_credentials_for_challenge
for obj in serializers.deserialize("json", challenge):
challenge_obj = obj.object
challenge_aws_keys = get_aws_credentials_for_challenge(challenge_obj.pk)
environment_suffix = "{}-{}".format(challenge_obj.pk, settings.ENVIRONMENT)
client = get_boto3_client("ec2", challenge_aws_keys)
vpc_ids = []
try:
response = client.create_vpc(CidrBlock=challenge_obj.vpc_cidr)
vpc_ids.append(response["Vpc"]["VpcId"])
except ClientError as e:
logger.exception(e)
return
waiter = client.get_waiter("vpc_available")
waiter.wait(VpcIds=vpc_ids)
# Create internet gateway and attach to vpc
try:
# Enable DNS resolution for VPC
response = client.modify_vpc_attribute(
EnableDnsHostnames={"Value": True}, VpcId=vpc_ids[0]
)
response = client.create_internet_gateway()
internet_gateway_id = response["InternetGateway"]["InternetGatewayId"]
client.attach_internet_gateway(
InternetGatewayId=internet_gateway_id, VpcId=vpc_ids[0]
)
# Create and attach route table
response = client.create_route_table(VpcId=vpc_ids[0])
route_table_id = response["RouteTable"]["RouteTableId"]
client.create_route(
DestinationCidrBlock="0.0.0.0/0",
GatewayId=internet_gateway_id,
RouteTableId=route_table_id,
)
# Create subnets
subnet_ids = []
response = client.create_subnet(
CidrBlock=challenge_obj.subnet_1_cidr,
AvailabilityZone="us-east-1a",
VpcId=vpc_ids[0],
)
subnet_1_id = response["Subnet"]["SubnetId"]
subnet_ids.append(subnet_1_id)
response = client.create_subnet(
CidrBlock=challenge_obj.subnet_2_cidr,
AvailabilityZone="us-east-1b",
VpcId=vpc_ids[0],
)
subnet_2_id = response["Subnet"]["SubnetId"]
subnet_ids.append(subnet_2_id)
waiter = client.get_waiter("subnet_available")
waiter.wait(SubnetIds=subnet_ids)
# Creating managed node group needs subnets to auto assign ip v4
for subnet_id in subnet_ids:
response = client.modify_subnet_attribute(
MapPublicIpOnLaunch={
"Value": True,
},
SubnetId=subnet_id,
)
# Associate route table with subnets
response = client.associate_route_table(
RouteTableId=route_table_id,
SubnetId=subnet_1_id,
)
response = client.associate_route_table(
RouteTableId=route_table_id,
SubnetId=subnet_2_id,
)
# Create security group
response = client.create_security_group(
GroupName="EvalAI code upload challenge",
Description="EvalAI code upload challenge worker group",
VpcId=vpc_ids[0],
)
security_group_id = response["GroupId"]
response = client.create_security_group(
GroupName="evalai-code-upload-challenge-efs-{}".format(
environment_suffix
),
Description="EKS nodegroup EFS",
VpcId=vpc_ids[0],
)
efs_security_group_id = response["GroupId"]
response = client.authorize_security_group_ingress(
GroupId=efs_security_group_id,
IpPermissions=[
{
"FromPort": 2049,
"IpProtocol": "tcp",
"IpRanges": [
{
"CidrIp": challenge_obj.vpc_cidr,
},
],
"ToPort": 2049,
}
],
)
# Create EFS
efs_client = get_boto3_client("efs", challenge_aws_keys)
efs_creation_token = str(uuid.uuid4())[:64]
response = efs_client.create_file_system(
CreationToken=efs_creation_token,
)
efs_id = response["FileSystemId"]
challenge_evaluation_cluster = ChallengeEvaluationCluster.objects.get(
challenge=challenge_obj
)
serializer = ChallengeEvaluationClusterSerializer(
challenge_evaluation_cluster,
data={
"vpc_id": vpc_ids[0],
"internet_gateway_id": internet_gateway_id,
"route_table_id": route_table_id,
"security_group_id": security_group_id,
"subnet_1_id": subnet_1_id,
"subnet_2_id": subnet_2_id,
"efs_security_group_id": efs_security_group_id,
"efs_id": efs_id,
"efs_creation_token": efs_creation_token,
},
partial=True,
)
if serializer.is_valid():
serializer.save()
# Create eks cluster
create_eks_cluster.delay(challenge)
except ClientError as e:
logger.exception(e)
return
@app.task
def create_eks_cluster(challenge):
"""
Called when Challenge is approved by the EvalAI admin
calls the create_eks_nodegroup function
Arguments:
sender {type} -- model field called the post hook
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model calling the post hook
"""
from .models import ChallengeEvaluationCluster
from .serializers import ChallengeEvaluationClusterSerializer
from .utils import get_aws_credentials_for_challenge
for obj in serializers.deserialize("json", challenge):
challenge_obj = obj.object
environment_suffix = "{}-{}".format(challenge_obj.pk, settings.ENVIRONMENT)
cluster_name = "{}-{}-cluster".format(
challenge_obj.title.replace(" ", "-"), environment_suffix
)
if challenge_obj.approved_by_admin and challenge_obj.is_docker_based:
challenge_aws_keys = get_aws_credentials_for_challenge(
challenge_obj.pk
)
client = get_boto3_client("eks", challenge_aws_keys)
cluster_meta = get_code_upload_setup_meta_for_challenge(
challenge_obj.pk
)
try:
response = client.create_cluster(
name=cluster_name,
version="1.15",
roleArn=cluster_meta["EKS_CLUSTER_ROLE_ARN"],
resourcesVpcConfig={
"subnetIds": [
cluster_meta["SUBNET_1"],
cluster_meta["SUBNET_2"],
],
"securityGroupIds": [
cluster_meta["SUBNET_SECURITY_GROUP"]
],
},
)
waiter = client.get_waiter("cluster_active")
waiter.wait(name=cluster_name)
# creating kubeconfig
cluster = client.describe_cluster(name=cluster_name)
cluster_cert = cluster["cluster"]["certificateAuthority"]["data"]
cluster_ep = cluster["cluster"]["endpoint"]
cluster_config = {
"apiVersion": "v1",
"kind": "Config",
"clusters": [
{
"cluster": {
"server": str(cluster_ep),
"certificate-authority-data": str(cluster_cert),
},
"name": "kubernetes",
}
],
"contexts": [
{
"context": {"cluster": "kubernetes", "user": "aws"},
"name": "aws",
}
],
"current-context": "aws",
"preferences": {},
"users": [
{
"name": "aws",
"user": {
"exec": {
"apiVersion": "client.authentication.k8s.io/v1alpha1",
"command": "heptio-authenticator-aws",
"args": ["token", "-i", cluster_name],
}
},
}
],
}
# Write in YAML.
config_text = yaml.dump(cluster_config, default_flow_style=False)
config_file = NamedTemporaryFile(delete=True)
config_file.write(config_text.encode())
challenge_evaluation_cluster = (
ChallengeEvaluationCluster.objects.get(challenge=challenge_obj)
)
efs_client = get_boto3_client("efs", challenge_aws_keys)
# Create mount targets for subnets
mount_target_ids = []
response = efs_client.create_mount_target(
FileSystemId=challenge_evaluation_cluster.efs_id,
SubnetId=challenge_evaluation_cluster.subnet_1_id,
SecurityGroups=[
challenge_evaluation_cluster.efs_security_group_id
],
)
mount_target_ids.append(response["MountTargetId"])
response = efs_client.create_mount_target(
FileSystemId=challenge_evaluation_cluster.efs_id,
SubnetId=challenge_evaluation_cluster.subnet_2_id,
SecurityGroups=[
challenge_evaluation_cluster.efs_security_group_id
],
)
mount_target_ids.append(response["MountTargetId"])
serializer = ChallengeEvaluationClusterSerializer(
challenge_evaluation_cluster,
data={
"name": cluster_name,
"cluster_endpoint": cluster_ep,
"cluster_ssl": cluster_cert,
"efs_mount_target_ids": mount_target_ids,
},
partial=True,
)
if serializer.is_valid():
serializer.save()
# Creating nodegroup
create_eks_nodegroup.delay(challenge, cluster_name)
return response
except ClientError as e:
logger.exception(e)
return
def challenge_approval_callback(sender, instance, field_name, **kwargs):
"""This is to check if a challenge has been approved or disapproved since last time.
On approval of a challenge, it launches a worker on Fargate.
On disapproval, it scales down the workers to 0, and deletes the challenge's service on Fargate.
Arguments:
sender -- The model which initated this callback (Challenge)
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model (a challenge object)
field_name {str} -- The name of the field to check for a change (approved_by_admin)
"""
prev = getattr(instance, "_original_{}".format(field_name))
curr = getattr(instance, "{}".format(field_name))
challenge = instance
challenge._original_approved_by_admin = curr
if not challenge.is_docker_based and challenge.remote_evaluation is False:
if curr and not prev:
if not challenge.workers:
response = start_workers([challenge])
count, failures = response["count"], response["failures"]
if not count:
logger.error(
"Worker for challenge {} couldn't start! Error: {}".format(
challenge.id, failures[0]["message"]
)
)
else:
construct_and_send_worker_start_mail(challenge)
if prev and not curr:
if challenge.workers:
response = delete_workers([challenge])
count, failures = response["count"], response["failures"]
if not count:
logger.error(
"Worker for challenge {} couldn't be deleted! Error: {}".format(
challenge.id, failures[0]["message"]
)
)
| 35.623398 | 150 | 0.547526 |
7945777d846c913eb484ec5d026f713651af8cfc | 1,438 | py | Python | onnx_chainer/functions/connection/linear.py | Hakuyume/onnx-chainer | 3c46bd692ef38a7c0f45a2a09795d2023364e12b | [
"MIT"
] | null | null | null | onnx_chainer/functions/connection/linear.py | Hakuyume/onnx-chainer | 3c46bd692ef38a7c0f45a2a09795d2023364e12b | [
"MIT"
] | null | null | null | onnx_chainer/functions/connection/linear.py | Hakuyume/onnx-chainer | 3c46bd692ef38a7c0f45a2a09795d2023364e12b | [
"MIT"
] | null | null | null | import os
import numpy as np
from onnx import helper
from onnx import numpy_helper
from onnx_chainer import mapping
def convert_LinearFunction(
func, input_names, param_names, parameters, input_tensors):
input_names[input_names.index(id(func.W))] = param_names[id(func.W)]
if hasattr(func, 'b'):
input_names[input_names.index(id(func.b))] = param_names[id(func.b)]
else:
# If nobias=True, create zero vector and add it to parameters
layer_name = os.path.dirname(param_names[id(func.W)])
bias = np.zeros((func.W.shape[1],), dtype=func.W.array.dtype)
param_names[id(bias)] = os.path.join(layer_name, 'b')
parameters.append(
numpy_helper.from_array(
bias,
param_names[id(bias)]
)
)
input_tensors.append(
helper.make_tensor_value_info(
param_names[id(bias)],
mapping.dtypes[bias.dtype],
bias.shape
)
)
input_names.append(param_names[id(bias)])
for i, input_name in enumerate(input_names):
if type(input_name) is not str:
input_names[i] = str(input_name)
layer_name = mapping.operators[func.__class__.__name__]
out_names = [str(id(out())) for out in func.outputs]
return helper.make_node(
layer_name, input_names, out_names,
axis=1,
axis_w=1
),
| 31.26087 | 76 | 0.613352 |
79457825b83dd47bf10de3ce3a044006a91e4a2c | 21,474 | py | Python | pybind/slxos/v17r_2_00/routing_system/router/router_bgp/address_family/l2vpn/evpn/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/routing_system/router/router_bgp/address_family/l2vpn/evpn/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/routing_system/router/router_bgp/address_family/l2vpn/evpn/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import suppress
import graceful_restart
import retain
import neighbor
class evpn(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/router/router-bgp/address-family/l2vpn/evpn. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__suppress','__client_to_client_reflection','__graceful_restart','__retain','__vtep_discovery','__neighbor',)
_yang_name = 'evpn'
_rest_name = 'evpn'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__client_to_client_reflection = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="client-to-client-reflection", rest_name="client-to-client-reflection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-run-template': u'$(.?:no client-to-client-reflection\n)', u'info': u'Configure client to client route reflection'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
self.__graceful_restart = YANGDynClass(base=graceful_restart.graceful_restart, is_container='container', presence=False, yang_name="graceful-restart", rest_name="graceful-restart", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enables the BGP graceful restart capability', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
self.__vtep_discovery = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="vtep-discovery", rest_name="vtep-discovery", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-run-template': u'$(.?:no vtep-discovery\n)', u'info': u'Enable VTEP discovery'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
self.__suppress = YANGDynClass(base=suppress.suppress, is_container='container', presence=False, yang_name="suppress", rest_name="suppress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
self.__neighbor = YANGDynClass(base=neighbor.neighbor, is_container='container', presence=False, yang_name="neighbor", rest_name="neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a neighbor router', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
self.__retain = YANGDynClass(base=retain.retain, is_container='container', presence=False, yang_name="retain", rest_name="retain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Retain route targets'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'router', u'router-bgp', u'address-family', u'l2vpn', u'evpn']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'router', u'bgp', u'address-family', u'l2vpn', u'evpn']
def _get_suppress(self):
"""
Getter method for suppress, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/suppress (container)
"""
return self.__suppress
def _set_suppress(self, v, load=False):
"""
Setter method for suppress, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/suppress (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_suppress is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_suppress() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=suppress.suppress, is_container='container', presence=False, yang_name="suppress", rest_name="suppress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """suppress must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=suppress.suppress, is_container='container', presence=False, yang_name="suppress", rest_name="suppress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__suppress = t
if hasattr(self, '_set'):
self._set()
def _unset_suppress(self):
self.__suppress = YANGDynClass(base=suppress.suppress, is_container='container', presence=False, yang_name="suppress", rest_name="suppress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
def _get_client_to_client_reflection(self):
"""
Getter method for client_to_client_reflection, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/client_to_client_reflection (empty)
"""
return self.__client_to_client_reflection
def _set_client_to_client_reflection(self, v, load=False):
"""
Setter method for client_to_client_reflection, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/client_to_client_reflection (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_client_to_client_reflection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_client_to_client_reflection() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="client-to-client-reflection", rest_name="client-to-client-reflection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-run-template': u'$(.?:no client-to-client-reflection\n)', u'info': u'Configure client to client route reflection'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """client_to_client_reflection must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="client-to-client-reflection", rest_name="client-to-client-reflection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-run-template': u'$(.?:no client-to-client-reflection\n)', u'info': u'Configure client to client route reflection'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)""",
})
self.__client_to_client_reflection = t
if hasattr(self, '_set'):
self._set()
def _unset_client_to_client_reflection(self):
self.__client_to_client_reflection = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="client-to-client-reflection", rest_name="client-to-client-reflection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-run-template': u'$(.?:no client-to-client-reflection\n)', u'info': u'Configure client to client route reflection'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
def _get_graceful_restart(self):
"""
Getter method for graceful_restart, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/graceful_restart (container)
"""
return self.__graceful_restart
def _set_graceful_restart(self, v, load=False):
"""
Setter method for graceful_restart, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/graceful_restart (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_graceful_restart is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_graceful_restart() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=graceful_restart.graceful_restart, is_container='container', presence=False, yang_name="graceful-restart", rest_name="graceful-restart", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enables the BGP graceful restart capability', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """graceful_restart must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=graceful_restart.graceful_restart, is_container='container', presence=False, yang_name="graceful-restart", rest_name="graceful-restart", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enables the BGP graceful restart capability', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__graceful_restart = t
if hasattr(self, '_set'):
self._set()
def _unset_graceful_restart(self):
self.__graceful_restart = YANGDynClass(base=graceful_restart.graceful_restart, is_container='container', presence=False, yang_name="graceful-restart", rest_name="graceful-restart", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enables the BGP graceful restart capability', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
def _get_retain(self):
"""
Getter method for retain, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/retain (container)
"""
return self.__retain
def _set_retain(self, v, load=False):
"""
Setter method for retain, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/retain (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_retain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retain() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=retain.retain, is_container='container', presence=False, yang_name="retain", rest_name="retain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Retain route targets'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """retain must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=retain.retain, is_container='container', presence=False, yang_name="retain", rest_name="retain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Retain route targets'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__retain = t
if hasattr(self, '_set'):
self._set()
def _unset_retain(self):
self.__retain = YANGDynClass(base=retain.retain, is_container='container', presence=False, yang_name="retain", rest_name="retain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Retain route targets'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
def _get_vtep_discovery(self):
"""
Getter method for vtep_discovery, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/vtep_discovery (empty)
"""
return self.__vtep_discovery
def _set_vtep_discovery(self, v, load=False):
"""
Setter method for vtep_discovery, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/vtep_discovery (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_vtep_discovery is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vtep_discovery() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="vtep-discovery", rest_name="vtep-discovery", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-run-template': u'$(.?:no vtep-discovery\n)', u'info': u'Enable VTEP discovery'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vtep_discovery must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="vtep-discovery", rest_name="vtep-discovery", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-run-template': u'$(.?:no vtep-discovery\n)', u'info': u'Enable VTEP discovery'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)""",
})
self.__vtep_discovery = t
if hasattr(self, '_set'):
self._set()
def _unset_vtep_discovery(self):
self.__vtep_discovery = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="vtep-discovery", rest_name="vtep-discovery", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-run-template': u'$(.?:no vtep-discovery\n)', u'info': u'Enable VTEP discovery'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
def _get_neighbor(self):
"""
Getter method for neighbor, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/neighbor (container)
"""
return self.__neighbor
def _set_neighbor(self, v, load=False):
"""
Setter method for neighbor, mapped from YANG variable /routing_system/router/router_bgp/address_family/l2vpn/evpn/neighbor (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=neighbor.neighbor, is_container='container', presence=False, yang_name="neighbor", rest_name="neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a neighbor router', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """neighbor must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=neighbor.neighbor, is_container='container', presence=False, yang_name="neighbor", rest_name="neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a neighbor router', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__neighbor = t
if hasattr(self, '_set'):
self._set()
def _unset_neighbor(self):
self.__neighbor = YANGDynClass(base=neighbor.neighbor, is_container='container', presence=False, yang_name="neighbor", rest_name="neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a neighbor router', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
suppress = __builtin__.property(_get_suppress, _set_suppress)
client_to_client_reflection = __builtin__.property(_get_client_to_client_reflection, _set_client_to_client_reflection)
graceful_restart = __builtin__.property(_get_graceful_restart, _set_graceful_restart)
retain = __builtin__.property(_get_retain, _set_retain)
vtep_discovery = __builtin__.property(_get_vtep_discovery, _set_vtep_discovery)
neighbor = __builtin__.property(_get_neighbor, _set_neighbor)
_pyangbind_elements = {'suppress': suppress, 'client_to_client_reflection': client_to_client_reflection, 'graceful_restart': graceful_restart, 'retain': retain, 'vtep_discovery': vtep_discovery, 'neighbor': neighbor, }
| 71.10596 | 550 | 0.741827 |
7945796714426197783e5ff2121fad057e27b0bd | 8,826 | py | Python | poetry/utils/_compat.py | noamraph/poetry | 0d48fb669e42d2b662b8e0abef6af8c8bb9ab68a | [
"MIT"
] | 1 | 2020-08-19T19:51:22.000Z | 2020-08-19T19:51:22.000Z | poetry/utils/_compat.py | noamraph/poetry | 0d48fb669e42d2b662b8e0abef6af8c8bb9ab68a | [
"MIT"
] | null | null | null | poetry/utils/_compat.py | noamraph/poetry | 0d48fb669e42d2b662b8e0abef6af8c8bb9ab68a | [
"MIT"
] | null | null | null | import sys
try:
from functools32 import lru_cache
except ImportError:
from functools import lru_cache
try:
from glob2 import glob
except ImportError:
from glob import glob
try:
from importlib import metadata
import zipfile as zipp
except ImportError:
import importlib_metadata as metadata
import zipp
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
from os import cpu_count
except ImportError: # Python 2
from multiprocessing import cpu_count
try: # Python 2
long = long
unicode = unicode
basestring = basestring
except NameError: # Python 3
long = int
unicode = str
basestring = str
PY2 = sys.version_info[0] == 2
PY34 = sys.version_info >= (3, 4)
PY35 = sys.version_info >= (3, 5)
PY36 = sys.version_info >= (3, 6)
WINDOWS = sys.platform == "win32"
if PY2:
import pipes
shell_quote = pipes.quote
else:
import shlex
shell_quote = shlex.quote
if PY34:
from importlib.machinery import EXTENSION_SUFFIXES
else:
from imp import get_suffixes
EXTENSION_SUFFIXES = [suffix[0] for suffix in get_suffixes()]
if PY35:
from pathlib import Path
else:
from pathlib2 import Path
if not PY36:
from collections import OrderedDict
else:
OrderedDict = dict
if PY35:
import subprocess as subprocess
from subprocess import CalledProcessError
else:
import subprocess32 as subprocess
from subprocess32 import CalledProcessError
if PY34:
# subprocess32 pass the calls directly to subprocess
# on Python 3.3+ but Python 3.4 does not provide run()
# so we backport it
import signal
from subprocess import PIPE
from subprocess import Popen
from subprocess import SubprocessError
from subprocess import TimeoutExpired
class CalledProcessError(SubprocessError):
"""Raised when run() is called with check=True and the process
returns a non-zero exit status.
Attributes:
cmd, returncode, stdout, stderr, output
"""
def __init__(self, returncode, cmd, output=None, stderr=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
self.stderr = stderr
def __str__(self):
if self.returncode and self.returncode < 0:
try:
return "Command '%s' died with %r." % (
self.cmd,
signal.Signals(-self.returncode),
)
except ValueError:
return "Command '%s' died with unknown signal %d." % (
self.cmd,
-self.returncode,
)
else:
return "Command '%s' returned non-zero exit status %d." % (
self.cmd,
self.returncode,
)
@property
def stdout(self):
"""Alias for output attribute, to match stderr"""
return self.output
@stdout.setter
def stdout(self, value):
# There's no obvious reason to set this, but allow it anyway so
# .stdout is a transparent alias for .output
self.output = value
class CompletedProcess(object):
"""A process that has finished running.
This is returned by run().
Attributes:
args: The list or str args passed to run().
returncode: The exit code of the process, negative for signals.
stdout: The standard output (None if not captured).
stderr: The standard error (None if not captured).
"""
def __init__(self, args, returncode, stdout=None, stderr=None):
self.args = args
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
args = [
"args={!r}".format(self.args),
"returncode={!r}".format(self.returncode),
]
if self.stdout is not None:
args.append("stdout={!r}".format(self.stdout))
if self.stderr is not None:
args.append("stderr={!r}".format(self.stderr))
return "{}({})".format(type(self).__name__, ", ".join(args))
def check_returncode(self):
"""Raise CalledProcessError if the exit code is non-zero."""
if self.returncode:
raise CalledProcessError(
self.returncode, self.args, self.stdout, self.stderr
)
def run(*popenargs, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.
The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
"""
input = kwargs.pop("input", None)
timeout = kwargs.pop("timeout", None)
check = kwargs.pop("check", False)
if input is not None:
if "stdin" in kwargs:
raise ValueError("stdin and input arguments may not both be used.")
kwargs["stdin"] = PIPE
process = Popen(*popenargs, **kwargs)
try:
process.__enter__() # No-Op really... illustrate "with in 2.4"
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
raise TimeoutExpired(
process.args, timeout, output=stdout, stderr=stderr
)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise CalledProcessError(
retcode, process.args, output=stdout, stderr=stderr
)
finally:
# None because our context manager __exit__ does not use them.
process.__exit__(None, None, None)
return CompletedProcess(process.args, retcode, stdout, stderr)
subprocess.run = run
subprocess.CalledProcessError = CalledProcessError
def decode(string, encodings=None):
if not PY2 and not isinstance(string, bytes):
return string
if PY2 and isinstance(string, unicode):
return string
encodings = encodings or ["utf-8", "latin1", "ascii"]
for encoding in encodings:
try:
return string.decode(encoding)
except (UnicodeEncodeError, UnicodeDecodeError):
pass
return string.decode(encodings[0], errors="ignore")
def encode(string, encodings=None):
if not PY2 and isinstance(string, bytes):
return string
if PY2 and isinstance(string, str):
return string
encodings = encodings or ["utf-8", "latin1", "ascii"]
for encoding in encodings:
try:
return string.encode(encoding)
except (UnicodeEncodeError, UnicodeDecodeError):
pass
return string.encode(encodings[0], errors="ignore")
def to_str(string):
if isinstance(string, str) or not isinstance(string, (unicode, bytes)):
return string
if PY2:
method = "encode"
else:
method = "decode"
encodings = ["utf-8", "latin1", "ascii"]
for encoding in encodings:
try:
return getattr(string, method)(encoding)
except (UnicodeEncodeError, UnicodeDecodeError):
pass
return getattr(string, method)(encodings[0], errors="ignore")
def list_to_shell_command(cmd):
executable = cmd[0]
if " " in executable:
executable = '"{}"'.format(executable)
cmd[0] = executable
return " ".join(cmd)
| 29.918644 | 84 | 0.603218 |
79457a8e75299df14355290a8aaa7819abc6fb91 | 36,120 | py | Python | mars/tensor/statistics/histogram.py | acezen/mars | c6b4f6e5f9ab4caf9d8e82108e2dd49d312e39fd | [
"Apache-2.0"
] | 2 | 2019-03-29T04:11:10.000Z | 2020-07-08T10:19:54.000Z | mars/tensor/statistics/histogram.py | acezen/mars | c6b4f6e5f9ab4caf9d8e82108e2dd49d312e39fd | [
"Apache-2.0"
] | null | null | null | mars/tensor/statistics/histogram.py | acezen/mars | c6b4f6e5f9ab4caf9d8e82108e2dd49d312e39fd | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import operator
import numpy as np
from ... import opcodes as OperandDef
from ... import tensor as mt
from ...serialize import AnyField, TupleField, KeyField, BoolField
from ...tiles import TilesError
from ...context import get_context
from ...utils import check_chunks_unknown_shape, recursive_tile
from ..core import TENSOR_TYPE, TENSOR_CHUNK_TYPE, TensorOrder
from ..operands import TensorOperand, TensorOperandMixin
from ..datasource import tensor as astensor
from ..arithmetic.utils import tree_add
from ..utils import is_asc_sorted
from ..array_utils import as_same_device, device
# note: some logic of this file were adopted from `numpy/lib/histograms`
def _ptp(range_):
"""Peak-to-peak value of x.
This implementation avoids the problem of signed integer arrays having a
peak-to-peak value that cannot be represented with the array's data type.
This function returns an unsigned value for signed integer arrays.
"""
return _unsigned_subtract(*range_[::-1])
class HistBinSelector:
def __init__(self, histogram_bin_edges_op, x, range, raw_range):
self._op = histogram_bin_edges_op
self._x = x
self._range = range
self._raw_range = raw_range
def check(self):
if len(self._op._calc_bin_edges_dependencies) == 0:
# not checked before
width = self()
if width is None:
return
err = TilesError('bin edges calculation requires '
'some dependencies executed first')
self._op._calc_bin_edges_dependencies = [width]
recursive_tile(width)
err.partial_tiled_chunks = [c.data for c in width.chunks]
raise err
def __call__(self):
return
def get_result(self):
ctx = get_context()
width = ctx.get_chunk_results(
[self._op._calc_bin_edges_dependencies[0].chunks[0].key])[0]
return width
class HistBinSqrtSelector(HistBinSelector):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
"""
def get_result(self):
return _ptp(self._raw_range) / np.sqrt(self._x.size)
class HistBinSturgesSelector(HistBinSelector):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
"""
def get_result(self):
return _ptp(self._raw_range) / (np.log2(self._x.size) + 1.0)
class HistBinRiceSelector(HistBinSelector):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
"""
def get_result(self):
return _ptp(self._raw_range) / (2.0 * self._x.size ** (1.0 / 3))
class HistBinScottSelector(HistBinSelector):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
"""
def __call__(self):
return (24.0 * np.pi**0.5 / self._x.size)**(1.0 / 3.0) * mt.std(self._x)
class HistBinStoneSelector(HistBinSelector):
"""
Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
This paper by Stone appears to be the origination of this rule.
http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
"""
def __call__(self):
n = self._x.size
ptp_x = _ptp(self._raw_range)
if n <= 1 or ptp_x == 0:
return
nbins_upper_bound = max(100, int(np.sqrt(n)))
candidates = []
for nbins in range(1, nbins_upper_bound + 1):
hh = ptp_x / nbins
p_k = histogram(self._x, bins=nbins, range=self._range)[0] / n
candidate = (2 - (n + 1) * p_k.dot(p_k)) / hh
candidates.append(candidate)
nbins = mt.stack(candidates).argmin() + 1
return ptp_x / nbins
def get_result(self):
ptp_x = _ptp(self._raw_range)
if self._x.size <= 1 or ptp_x == 0:
return 0.0
else:
return super().get_result()
class HistBinDoaneSelector(HistBinSelector):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
"""
def __call__(self):
x = self._x
if x.size <= 2:
return
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = mt.std(x)
g1 = mt.mean(((x - mt.mean(x)) / sigma)**3)
ret = _ptp(self._raw_range) / (1.0 + np.log2(x.size) +
mt.log2(1.0 + mt.absolute(g1) / sg1))
return mt.where(sigma > 0.0, ret, 0.0)
def get_result(self):
if self._x.size <= 2:
return 0.0
else:
return super().get_result()
class HistBinFdSelector(HistBinSelector):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
"""
def __call__(self):
iqr = mt.subtract(*mt.percentile(self._x, [75, 25]))
return 2.0 * iqr * self._x.size ** (-1.0 / 3.0)
class HistBinAutoSelector(HistBinSelector):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
and the Sturges estimator if the FD bandwidth is 0.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off the shelf
behaviour.
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance based estimators will be of
use, so we revert to the sturges estimator, which only uses the size of the
dataset in its calculation.
"""
def __init__(self, histogram_bin_edges_op, x, range, raw_range):
super().__init__(histogram_bin_edges_op, x, range, raw_range)
self._bin_fd = HistBinFdSelector(
histogram_bin_edges_op, x, range, raw_range)
self._bin_sturges = HistBinSturgesSelector(
histogram_bin_edges_op, x, range, raw_range)
def __call__(self):
return self._bin_fd()
def get_result(self):
fd_bw = super().get_result()
sturges_bw = self._bin_sturges.get_result()
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'stone': HistBinStoneSelector,
'auto': HistBinAutoSelector,
'doane': HistBinDoaneSelector,
'fd': HistBinFdSelector,
'rice': HistBinRiceSelector,
'scott': HistBinScottSelector,
'sqrt': HistBinSqrtSelector,
'sturges': HistBinSturgesSelector}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = astensor(a)
# Ensure that the array is a "subtractable" dtype
if a.dtype == np.bool_:
warnings.warn(f"Converting input from {a.dtype} to {np.uint8} for compatibility.",
RuntimeWarning, stacklevel=3)
a = a.astype(np.uint8)
if weights is not None:
weights = astensor(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _check_range(range):
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
f"supplied range of [{first_edge}, {last_edge}] is not finite")
return first_edge, last_edge
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = _check_range(range)
else:
assert a.size == 0
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _unsigned_subtract(a, b):
"""
Subtract two values where a >= b, and produce an unsigned result
This is needed when finding the difference between the upper and lower
bound of an int16 histogram
"""
# coerce to a single type
signed_to_unsigned = {
np.byte: np.ubyte,
np.short: np.ushort,
np.intc: np.uintc,
np.int_: np.uint,
np.longlong: np.ulonglong
}
dt = np.result_type(a, b)
try:
dt = signed_to_unsigned[dt.type]
except KeyError: # pragma: no cover
return np.subtract(a, b, dtype=dt)
else:
# we know the inputs are integers, and we are deliberately casting
# signed to unsigned
return np.subtract(a, b, casting='unsafe', dtype=dt)
def _get_bin_edges(op, a, bins, range, weights):
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
first_edge = None
last_edge = None
if isinstance(bins, str):
# when `bins` is str, x.min() and x.max()
# will be calculated in advance
bin_name = bins
if a.size > 0:
assert range is not None
raw_range = range
first_edge, last_edge = _get_outer_edges(a, range)
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
selector = _hist_bin_selectors[bin_name](op, a, (first_edge, last_edge), raw_range)
selector.check()
width = selector.get_result()
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif mt.ndim(bins) == 0:
first_edge, last_edge = _get_outer_edges(a, range)
n_equal_bins = bins
else:
# cannot be Tensor, must be calculated first
assert mt.ndim(bins) == 1 and not isinstance(bins, TENSOR_TYPE)
bin_edges = np.asarray(bins)
if not is_asc_sorted(bin_edges):
raise ValueError(
'`bins` must increase monotonically, when an array')
if n_equal_bins is not None:
# numpy gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = mt.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type, gpu=op.gpu)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return mt.tensor(bin_edges), None
class TensorHistogramBinEdges(TensorOperand, TensorOperandMixin):
__slots__ = '_calc_bin_edges_dependencies',
_op_type_ = OperandDef.HISTOGRAM_BIN_EDGES
_input = KeyField('input')
_bins = AnyField('bins')
_range = TupleField('range')
_weights = KeyField('weights')
_input_min = KeyField('input_min')
_input_max = KeyField('input_max')
_uniform_bins = TupleField('uniform_bins')
def __init__(self, input=None, bins=None, range=None, weights=None,
input_min=None, input_max=None, dtype=None, **kw):
super().__init__(_input=input, _bins=bins, _range=range, _weights=weights,
_input_min=input_min, _input_max=input_max, _dtype=dtype, **kw)
if getattr(self, '_calc_bin_edges_dependencies', None) is None:
self._calc_bin_edges_dependencies = []
@property
def input(self):
return self._input
@property
def bins(self):
return self._bins
@property
def range(self):
return self._range
@property
def weights(self):
return self._weights
@property
def input_min(self):
return self._input_min
@property
def input_max(self):
return self._input_max
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs)
self._input = next(inputs_iter)
if isinstance(self._bins, (TENSOR_TYPE, TENSOR_CHUNK_TYPE)):
self._bins = next(inputs_iter)
if self._weights is not None:
self._weights = next(inputs_iter)
if self._input_min is not None:
self._input_min = next(inputs_iter)
if self._input_max is not None:
self._input_max = next(inputs_iter)
def __call__(self, a, bins, range, weights):
if range is not None:
_check_range(range)
if isinstance(bins, str):
# string, 'auto', 'stone', ...
# shape is unknown
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
f"{bin_name!r} is not a valid estimator for `bins`")
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
if isinstance(range, tuple) and len(range) == 2:
# if `bins` is a string, e.g. 'auto', 'stone'...,
# and `range` provided as well,
# `a` should be trimmed first
first_edge, last_edge = _get_outer_edges(a, range)
a = a[(a >= first_edge) & (a <= last_edge)]
shape = (np.nan,)
elif mt.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError: # pragma: no cover
raise TypeError(
'`bins` must be an integer, a string, or an array')
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
shape = (bins + 1,)
elif mt.ndim(bins) == 1:
if not isinstance(bins, TENSOR_TYPE):
bins = np.asarray(bins)
if not is_asc_sorted(bins):
raise ValueError(
'`bins` must increase monotonically, when an array')
shape = astensor(bins).shape
else:
raise ValueError('`bins` must be 1d, when an array')
inputs = [a]
if isinstance(bins, TENSOR_TYPE):
inputs.append(bins)
if weights is not None:
inputs.append(weights)
if (a.size > 0 or np.isnan(a.size)) and \
(isinstance(bins, str) or mt.ndim(bins) == 0) and not range:
# for bins that is str or integer,
# requires min max calculated first
input_min = self._input_min = a.min()
inputs.append(input_min)
input_max = self._input_max = a.max()
inputs.append(input_max)
return self.new_tensor(inputs, shape=shape, order=TensorOrder.C_ORDER)
@classmethod
def tile(cls, op):
ctx = get_context()
range_ = op.range
if isinstance(op.bins, str):
check_chunks_unknown_shape([op.input], TilesError)
if op.input_min is not None:
# check if input min and max are calculated
min_max_chunk_keys = \
[inp.chunks[0].key for inp in (op.input_min, op.input_max)]
metas = ctx.get_chunk_metas(min_max_chunk_keys)
if any(meta is None for meta in metas):
raise TilesError('`input_min` or `input_max` need be executed first')
range_ = tuple(ctx.get_chunk_results(min_max_chunk_keys))
if isinstance(op.bins, TENSOR_TYPE):
# `bins` is a Tensor, needs to be calculated first
bins_chunk_keys = [c.key for c in op.bins.chunks]
metas = ctx.get_chunk_metas(bins_chunk_keys)
if any(meta is None for meta in metas):
raise TilesError('`bins` should be executed first if it\'s a tensor')
bin_datas = ctx.get_chunk_results(bins_chunk_keys)
bins = np.concatenate(bin_datas)
else:
bins = op.bins
bin_edges, _ = _get_bin_edges(op, op.input, bins, range_, op.weights)
bin_edges = bin_edges._inplace_tile()
return [bin_edges]
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram`
function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened tensor.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
A tensor of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : tensor of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'fd' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> import mars.tensor as mt
>>> arr = mt.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> mt.histogram_bin_edges(arr, bins='auto', range=(0, 1)).execute()
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> mt.histogram_bin_edges(arr, bins=2).execute()
array([0. , 2.5, 5. ])
For consistency with histogram, a tensor of pre-computed bins is
passed through unmodified:
>>> mt.histogram_bin_edges(arr, [1, 2]).execute()
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = mt.histogram_bin_edges(arr, bins='auto')
>>> shared_bins.execute()
array([0., 1., 2., 3., 4., 5.])
>>> group_id = mt.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> a = arr[group_id == 0]
>>> a.execute()
array([0, 1, 3])
>>> hist_0, _ = mt.histogram(a, bins=shared_bins).execute()
>>> b = arr[group_id == 1]
>>> b.execute()
array([0, 0, 2, 3, 4, 5])
>>> hist_1, _ = mt.histogram(b, bins=shared_bins).execute()
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = mt.histogram(a, bins='auto').execute()
>>> hist_1, bins_1 = mt.histogram(b, bins='auto').execute()
>>> hist_0; hist_1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
op = TensorHistogramBinEdges(input=a, bins=bins,
range=range, weights=weights,
dtype=a.dtype)
return op(a, bins, range, weights)
class TensorHistogram(TensorOperand, TensorOperandMixin):
_op_type_ = OperandDef.HISTOGRAM
_input = KeyField('input')
_bins = AnyField('bins')
_range = TupleField('range')
_weights = KeyField('weights')
_density = BoolField('density')
_ret_bins = BoolField('ret_bins')
def __init__(self, input=None, bins=None, range=None, weights=None,
density=None, ret_bins=None, **kw):
super().__init__(_input=input, _bins=bins, _range=range, _weights=weights,
_density=density, _ret_bins=ret_bins, **kw)
@property
def input(self):
return self._input
@property
def bins(self):
return self._bins
@property
def range(self):
return self._range
@property
def weights(self):
return self._weights
@property
def density(self):
return self._density
@property
def ret_bins(self):
return self._ret_bins
@property
def output_limit(self):
return 1 if not self._ret_bins else 2
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs)
self._input = next(inputs_iter)
if isinstance(self._bins, (TENSOR_TYPE, TENSOR_CHUNK_TYPE)):
self._bins = next(inputs_iter)
if self._weights is not None:
self._weights = next(inputs_iter)
def __call__(self, a, bins, range, weights):
a, weights = _ravel_and_check_weights(a, weights)
histogram_bin_edges_op = TensorHistogramBinEdges(
input=a, bins=bins, range=range, weights=weights,
dtype=np.dtype(np.float64))
bins = self._bins = histogram_bin_edges_op(a, bins, range, weights)
inputs = [histogram_bin_edges_op.input]
if isinstance(bins, TENSOR_TYPE):
inputs.append(bins)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
dtype = np.dtype(np.intp)
else:
inputs.append(weights)
dtype = weights.dtype
self._dtype = dtype
hist = self.new_tensor(inputs, shape=(bins.size - 1,),
order=TensorOrder.C_ORDER)
return mt.ExecutableTuple([hist, bins])
@classmethod
def tile(cls, op):
bins = op.bins.rechunk(op.bins.shape)
shape = (bins.size - 1,)
out = op.outputs[0]
weights = None
if op.weights is not None:
# make input and weights have the same nsplits
weights = op.weights.rechunk(op.input.nsplits)._inplace_tile()
out_chunks = []
for chunk in op.input.chunks:
chunk_op = op.copy().reset_key()
chunk_op._range = None
chunk_op._ret_bins = False
chunk_op._density = False
chunk_inputs = [chunk, bins.chunks[0]]
if weights is not None:
weights_chunk = weights.cix[chunk.index]
chunk_inputs.append(weights_chunk)
out_chunk = chunk_op.new_chunk(chunk_inputs, shape=shape,
index=chunk.index, order=out.order)
out_chunks.append(out_chunk)
# merge chunks together
chunk = tree_add(out.dtype, out_chunks, (0,), shape)
new_op = op.copy()
n = new_op.new_tensor(op.inputs, shape=shape, order=out.order,
chunks=[chunk], nsplits=((shape[0],),))
if op.density:
db = mt.array(mt.diff(bins), float)
hist = n / db / n.sum()
recursive_tile(hist)
return [hist]
else:
return [n]
@classmethod
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[inp.key] for inp in op.inputs], device=op.device, ret_extra=True)
a = inputs[0]
bins = inputs[1] if isinstance(op.bins, TENSOR_CHUNK_TYPE) else op.bins
weights = None
if op.weights is not None:
weights = inputs[-1]
with device(device_id):
hist, bin_edges = xp.histogram(a, bins=bins, range=op.range,
weights=weights, density=op.density)
ctx[op.outputs[0].key] = hist
if op.ret_bins:
ctx[op.outputs[1].key] = bin_edges
def histogram(a, bins=10, range=None, weights=None, density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened tensor.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing tensor of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
A tensor of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : tensor
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : tensor of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Examples
--------
>>> import mars.tensor as mt
>>> mt.histogram([1, 2, 1], bins=[0, 1, 2, 3]).execute()
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> mt.histogram(mt.arange(4), bins=mt.arange(5), density=True).execute()
(array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> mt.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]).execute()
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = mt.arange(5)
>>> hist, bin_edges = mt.histogram(a, density=True)
>>> hist.execute()
array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum().execute()
2.4999999999999996
>>> mt.sum(hist * mt.diff(bin_edges)).execute()
1.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = mt.random.RandomState(10) # deterministic random data
>>> a = mt.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> _ = plt.hist(np.asarray(a), bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
Text(0.5, 1.0, "Histogram with 'auto' bins")
>>> plt.show()
"""
op = TensorHistogram(input=a, bins=bins, range=range,
weights=weights, density=density)
return op(a, bins, range, weights)
| 36.595745 | 104 | 0.619961 |
79457b8d82a7b8f78f7406bf2d7fd6ed3506074e | 760 | py | Python | makalu/views/user.py | duckpage/makalu | 357ec5b18560f601a33d68bb3a8e7eace2fde7d7 | [
"MIT"
] | null | null | null | makalu/views/user.py | duckpage/makalu | 357ec5b18560f601a33d68bb3a8e7eace2fde7d7 | [
"MIT"
] | null | null | null | makalu/views/user.py | duckpage/makalu | 357ec5b18560f601a33d68bb3a8e7eace2fde7d7 | [
"MIT"
] | null | null | null | from django.shortcuts import render, HttpResponseRedirect, HttpResponse, Http404
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.urls import reverse
from django.contrib.auth.models import User
from makalu.models import Invoice
@login_required
def user_read(request, u):
invoiceuser = None
try:
invoiceuser = User.objects.get(username=u)
except User.DoesNotExist:
messages.success(request, _('Questo Utente non esiste.'))
return render(request, 'makalu/user/read.html', {
'invoiceuser': invoiceuser
})
| 30.4 | 80 | 0.764474 |
79457c546a8776ef7035c0c268905cd39c1f0954 | 1,895 | py | Python | tests/tool_provider_tests.py | bsulcer/ims_lti_py | 979244d83c2e6420d2c1941f58e52f641c56ad12 | [
"MIT"
] | 18 | 2015-02-04T06:56:02.000Z | 2021-01-14T08:48:05.000Z | tests/tool_provider_tests.py | mitodl/ims_lti_py | d96a6201cf3b63b9e1b33780ef5e29bc65242ceb | [
"MIT"
] | 6 | 2015-01-26T19:00:56.000Z | 2018-03-23T05:52:29.000Z | tests/tool_provider_tests.py | mitodl/ims_lti_py | d96a6201cf3b63b9e1b33780ef5e29bc65242ceb | [
"MIT"
] | 27 | 2015-04-16T12:05:42.000Z | 2020-12-24T00:32:53.000Z | from test_helper import create_test_tp, create_params_tp
import unittest
class TestToolProvider(unittest.TestCase):
def setUp(self):
self.params = create_params_tp()
self.tp = create_test_tp()
def test_outcome_service(self):
'''
Should recognize an outcome service.
'''
self.assertTrue(self.tp.is_outcome_service())
self.tp.lis_result_sourcedid = None
self.assertFalse(self.tp.is_outcome_service())
def test_return_url_with_messages(self):
'''
Should generate a return url with messages.
'''
self.assertEqual(self.tp.build_return_url(),
self.params['launch_presentation_return_url'])
self.tp.lti_errormsg = 'user error message'
self.tp.lti_errorlog = 'lms error log'
self.tp.lti_msg = 'user message'
self.tp.lti_log = 'lms message'
self.assertEqual(self.tp.build_return_url(),
self.params['launch_presentation_return_url'] +
'?lti_msg=user+message<i_errormsg=user+error+message<i_errorlog=lms+error+log<i_log=lms+message')
def test_roles(self):
'''
Should recognize the roles.
'''
self.assertTrue(self.tp.is_student())
self.assertTrue(self.tp.is_instructor())
self.assertTrue(self.tp.has_role('Observer'))
self.assertFalse(self.tp.has_role('administrator'))
def test_username(self):
'''
Should find the best username.
'''
self.assertEqual(self.tp.username('guy'), 'guy')
self.tp.lis_person_name_full = 'full'
self.assertEqual(self.tp.username('guy'), 'full')
self.tp.lis_person_name_family = 'family'
self.assertEqual(self.tp.username('guy'), 'family')
self.tp.lis_person_name_given = 'given'
self.assertEqual(self.tp.username('guy'), 'given')
| 37.156863 | 119 | 0.641161 |
79457cb730acd938fffa9ba06dc7ad61786726bd | 3,239 | py | Python | checkkey.py | calllivecn/keyboardmouse | 9a6cd7f0057f2c09293f82de5b2ef3a2879d1c43 | [
"MIT"
] | null | null | null | checkkey.py | calllivecn/keyboardmouse | 9a6cd7f0057f2c09293f82de5b2ef3a2879d1c43 | [
"MIT"
] | null | null | null | checkkey.py | calllivecn/keyboardmouse | 9a6cd7f0057f2c09293f82de5b2ef3a2879d1c43 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import libevdev
def print_capabilities(l):
v = l.driver_version
print("Input driver version is {}.{}.{}".format(v >> 16, (v >> 8) & 0xff, v & 0xff))
id = l.id
print("Input device ID: bus {:#x} vendor {:#x} product {:#x} version {:#x}".format(
id["bustype"],
id["vendor"],
id["product"],
id["version"],
))
print("Input device name: {}".format(l.name))
print("Supported events:")
for t, cs in l.evbits.items():
print(" Event type {} ({})".format(t.value, t.name))
for c in cs:
if t in [libevdev.EV_LED, libevdev.EV_SND, libevdev.EV_SW]:
v = l.value[c]
print(" Event code {} ({}) state {}".format(c.value, c.name, v))
else:
print(" Event code {} ({})".format(c.value, c.name))
if t == libevdev.EV_ABS:
a = l.absinfo[c]
print(" {:10s} {:6d}".format('Value', a.value))
print(" {:10s} {:6d}".format('Minimum', a.minimum))
print(" {:10s} {:6d}".format('Maximum', a.maximum))
print(" {:10s} {:6d}".format('Fuzz', a.fuzz))
print(" {:10s} {:6d}".format('Flat', a.flat))
print(" {:10s} {:6d}".format('Resolution', a.resolution))
print("Properties:")
for p in l.properties:
print(" Property type {} ({})".format(p.value, p.name))
def print_event(e):
print("Event: time {}.{:06d}, ".format(e.sec, e.usec), end='')
if e.matches(libevdev.EV_SYN):
if e.matches(libevdev.EV_SYN.SYN_MT_REPORT):
print("++++++++++++++ {} ++++++++++++ {}".format(e.code.name, e.value))
elif e.matches(libevdev.EV_SYN.SYN_DROPPED):
print(">>>>>>>>>>>>>> {} >>>>>>>>>>>> {}".format(e.code.name, e.value))
else:
print("-------------- {} ------------ {}".format(e.code.name, e.value))
else:
print("type {:02x} {} code {:03x} {:20s} value {:4d}".format(e.type.value, e.type.name, e.code.value, e.code.name, e.value))
def main(args):
path = args[1]
try:
with open(path, "rb") as fd:
l = libevdev.Device(fd)
print_capabilities(l)
print("################################\n"
"# Waiting for events #\n"
"################################")
while True:
try:
for e in l.events():
print_event(e)
except libevdev.EventsDroppedException:
for e in l.sync():
print_event(e)
except KeyboardInterrupt:
pass
except IOError as e:
import errno
if e.errno == errno.EACCES:
print("Insufficient permissions to access {}".format(path))
elif e.errno == errno.ENOENT:
print("Device {} does not exist".format(path))
else:
raise e
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: {} /dev/input/eventX".format(sys.argv[0]))
sys.exit(1)
main(sys.argv)
| 35.206522 | 136 | 0.462488 |
79457ce15d18467244a1f23d9ea56022145bc397 | 604 | py | Python | Leetcode/438.find-all-anagrams-in-a-string.py | EdwaRen/Competitve-Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | 1 | 2021-05-03T21:48:25.000Z | 2021-05-03T21:48:25.000Z | Leetcode/438.find-all-anagrams-in-a-string.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | Leetcode/438.find-all-anagrams-in-a-string.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | class Solution(object):
def findAnagrams(self, s, p):
palin_dict = [0] * 26
for i in p:
palin_dict[ord(i) %26] +=1
# print("palin_dict", palin_dict)
res = []
for i in range(len(s)):
palin_dict[ord(s[i])%26] -=1
if i >= len(p):
palin_dict[ord(s[i-len(p)])%26] +=1
if len(set(palin_dict)) == 1 and (palin_dict)[0] == 0:
res.append(i-len(p)+1)
# print("set palindict", palin_dict)
return res
z = Solution()
s = "cbaebabacd"
p = "abc"
print(z.findAnagrams(s, p))
| 26.26087 | 66 | 0.490066 |
79457d2019fe5e90b923e4a0edcf717cb720a118 | 36,703 | py | Python | hexonet/ispapicli/gui/mainframe.py | hexonet/ispapicli | 4ed54a8500e9ae31d4aee33e8b657acf640a42ff | [
"MIT"
] | null | null | null | hexonet/ispapicli/gui/mainframe.py | hexonet/ispapicli | 4ed54a8500e9ae31d4aee33e8b657acf640a42ff | [
"MIT"
] | 84 | 2020-04-02T10:53:46.000Z | 2022-02-21T03:05:03.000Z | hexonet/ispapicli/gui/mainframe.py | hexonet/ispapicli | 4ed54a8500e9ae31d4aee33e8b657acf640a42ff | [
"MIT"
] | 5 | 2020-06-19T11:24:02.000Z | 2021-04-06T10:25:36.000Z | from typing import Text
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from modules.core import Core
from modules.scrap import Scrap
from gui.login import LoginWindow
import textwrap
import sys
from io import StringIO
import re
import os
import requests
from packaging import version
__version__ = "1.18.0"
class MainFrame(QWidget):
BATCH_COMMANDLINE_ID = 300
BATCH_PARAMETER_ID = 400
BATCH_LIST_ID = 500
BATCH_PARAMS = [
"Select",
"CONTACT",
"DNSZONE",
"DOMAIN",
"DOMAINAUTH",
"HOST",
"NAMESERVER",
"OBJECTID",
"SSLCERTID",
]
def __init__(self, parent=None):
super(MainFrame, self).__init__(parent)
# intialize the gui
self.originalPalette = QApplication.palette()
self.createTopGroupBox()
self.createLeftGroupBox()
self.createMiddleTabWidget()
self.createProgressBar()
self.createMenubar()
self.createToolbar()
# set gui layout
mainLayout = QGridLayout()
mainLayout.setMenuBar(self.leftMenuBar)
mainLayout.addWidget(self.toolbar, 0, 0, 1, 3)
mainLayout.addWidget(self.topBox, 1, 0, 1, 3)
mainLayout.addWidget(self.leftGroupBox, 2, 0)
mainLayout.addWidget(self.middleGroupBox, 2, 1)
mainLayout.addWidget(self.progressBar, 3, 0, 1, 3)
mainLayout.setRowStretch(2, 2)
mainLayout.setColumnStretch(0, 2)
mainLayout.setColumnStretch(1, 6)
self.setLayout(mainLayout)
self.setWindowTitle("ISPAPI-CLI Tool")
# set app gui style
QApplication.setStyle(QStyleFactory.create("Fusion"))
# create core login instnace
self.coreLogic = Core()
# scrap instance
self.scrap = Scrap()
# check user session upon start
self.checkLogin()
# set focus on command input field
self.cmdTxt.setFocus()
# initilaize command line completer
self.initialiseCommandCompleter()
# initialize subuser completer
self.initialiseSubuserCompleter()
# command to execute
self.commandToExecute = ""
# set app icon
self.setWindowIcon(QIcon(self.getIcon("logo-bgw.jpg")))
# init loading gif
# self.loading_screen = LoadingScreen()
def checkLogin(self):
result = self.coreLogic.checkSession()
if result == "valid":
self.sessionTime.setText("Your session is valid. ")
self.sessionTime.setStyleSheet("color:green")
self.loginBtn.setIcon(QIcon(self.getIcon("logout.png")))
self.loginBtn.setText("Logout")
self.loginBtn.clicked.connect(self.logout)
self.reconnectBtnAction(self.loginBtn.clicked, self.logout)
# enable gui
self.disableEnableGui("enable")
else:
self.sessionTime.setText("Session expired! ")
self.sessionTime.setStyleSheet("color:red")
self.loginBtn.setIcon(QIcon(self.getIcon("login.png")))
self.loginBtn.setText("Login")
self.reconnectBtnAction(self.loginBtn.clicked, self.openLoginWindow)
# diable gui
self.disableEnableGui("disable")
def reconnectBtnAction(self, signal, newhandler=None, oldhandler=None):
"""
Reconnecting login btn action to either login or logout
"""
while True:
try:
if oldhandler is not None:
signal.disconnect(oldhandler)
else:
signal.disconnect()
except TypeError:
break
if newhandler is not None:
signal.connect(newhandler)
def logout(self):
msg = self.coreLogic.logout()
alert = QMessageBox()
alert.setText(msg)
alert.exec_()
# update login
self.checkLogin()
def disableEnableGui(self, status=None):
"""
If session is expired then disable gui
"""
if status is not None:
if status == "enable":
self.leftGroupBox.setEnabled(True)
self.topBox.setEnabled(True)
# focus on command input field
self.cmdTxt.setFocus()
else:
self.leftGroupBox.setDisabled(True)
self.topBox.setDisabled(True)
else:
pass
def advanceProgressBar(self):
curVal = self.progressBar.value()
if curVal <= 99:
self.progressBar.setValue(curVal + 1)
else:
self.timer.stop()
self.progressBar.setValue(0)
def createProgressBar(
self,
):
self.progressBar = QProgressBar()
self.progressBar.setRange(0, 100)
self.progressBar.setValue(0)
self.progressBar.setMaximumHeight(5)
self.progressBar.setTextVisible(False)
# create a timer for the progress bar
self.timer = QTimer(self)
self.timer.timeout.connect(self.advanceProgressBar)
# call timer with speed of 5
self.progressBarSpeed(5)
def progressBarSpeed(self, speed):
self.timer.start(speed)
def onMyToolBarButtonClick(self, s):
print("click", s)
def executeCommand(self):
# start progressbar
self.progressBarSpeed(5)
# get args from the GUI
commandToExecute = self.commandText.toPlainText().lower()
if commandToExecute.startswith("-", 0, 1):
original_args = commandToExecute.splitlines()
else:
original_args = ("--" + commandToExecute).splitlines()
original_args = " ".join(original_args)
# remove extra spaces around the = cases are ' =', '= ', ' = '
original_args = original_args.replace(" = ", "=")
original_args = original_args.replace(" =", "=")
original_args = original_args.replace("= ", "=")
splitted_args = original_args.split()
# intialize the parser
core_obj = self.coreLogic
parser = core_obj.initParser()
# overwrite defualt error function with our local function to show on the GUI
parser.error = self.errorFunction
try:
args = vars(parser.parse_args(splitted_args))
reminderargs = args["args"]
# parse command args
result, data = core_obj.parseArgs(args)
# case gui started
if result == "gui":
self.plainResponse.setText("GUI already started")
# case of help command
elif result == "help":
helpText = ""
preHelp = textwrap.dedent(
"""\
ISPAPI - Commandline Tool
------------------------------------------------------------
The tool can be used in two modes:
- By using '=' sign e.g. --command=QueryDomainList limit=5
- By using spaces e.g. --command QueryDomainList limit 5
------------------------------------------------------------
"""
)
# redirect stdout
stringio = StringIO()
previous_stdout = sys.stdout
sys.stdout = stringio
# trigger parser help
parser.print_help()
# set back stdout
sys.stdout = previous_stdout
stdoutValue = stringio.getvalue()
# show output on the GUI
helpText = preHelp + stdoutValue
self.plainResponse.setText(helpText)
elif result == "cmd":
# append reminder args with the command
params_list = core_obj.parseParameters(reminderargs)
cmd = data
# add them to data which is the command list
cmd.update(params_list)
# check if subuser
subuser = self.subuser.text()
if len(subuser) > 1:
core_obj.cl.setUserView(subuser) # set subuser
else:
core_obj.cl.resetUserView() # remove subuser
# check for batches
batch_param = self.batchParams.currentText()
batch_params_list = self.batchParamsList.toPlainText()
active_tab = self.leftTabWidget.currentIndex()
if (
batch_param != "Select"
and batch_params_list != ""
and active_tab == 1
):
self.plainResponse.setText("")
lines = batch_params_list.split("\n")
for line in lines:
if line != "":
cmd[batch_param] = line
# request call
self.response = core_obj.request(cmd)
# set reult values to gui
self.populateResults(self.response, "iterative")
else:
# request call
self.response = core_obj.request(cmd)
# set reult values to gui
self.populateResults(self.response)
# case update commands
elif result == "update":
# create scrap object
# msg = "Please run this command in the terminal, use: ./ispapicli --update"
# self.plainResponse.setText(msg)
self.showUpdating()
else:
self.plainResponse.setText(data)
# 1 end the progress bar
# self.progressBarSpeed(5)
# 2
# check user session, in case of sesssion is expired
# self.checkLogin()
except Exception as e:
self.plainResponse.setText("Command failed due to: " + str(e))
def errorFunction(self, message):
self.plainResponse.setText("An error happend: " + message + "\n")
def updateCommandView(self, e):
cmdTxt = self.cmdTxt.text()
# check if the command is related to other actions
if cmdTxt.startswith("-", 0, 1):
self.commandText.setText(cmdTxt)
self.commandToExecute = cmdTxt
return 0
else:
args = "command "
args += cmdTxt
args = args.split()
# clean extra spaces, leave only single spaces among commands
original_args = " ".join(args)
# remove extra spaces around the = cases are ' =', '= ', ' = '
original_args = original_args.replace(" = ", "=")
original_args = original_args.replace(" =", "=")
original_args = original_args.replace("= ", "=")
# split args in an array
parameters = original_args.split()
# split commands if = used
params_len = len(parameters)
params = {}
try:
if params_len > 1:
i = 0
while i < params_len:
if "=" in parameters[i]:
key, value = parameters[i].split("=")
params[key] = value
else:
key = parameters[i]
i += 1
value = parameters[i]
params[key] = value
i += 1
self.commandText.setText()
except Exception as e:
pass
commandView = "\n".join(("{}={}".format(*i) for i in params.items()))
self.commandText.setText(commandView)
self.commandToExecute = "--" + commandView
def createToolbar(self):
self.toolbar = QToolBar("My main toolbar")
self.toolbar.setIconSize(QSize(20, 20))
saveAction = QAction(
QIcon(self.getIcon("save.png")), "Save results to a file", self
)
saveAction.triggered.connect(lambda: self.saveCommandToFile())
copyAction = QAction(
QIcon(self.getIcon("copy.png")), "Copy the results to clipboard", self
)
copyAction.triggered.connect(self.copyToClipboard)
helpAction = QAction(
QIcon(self.getIcon("help.png")), "See help documentation", self
)
helpAction.triggered.connect(self.showHelp)
updateAction = QAction(
QIcon(self.getIcon("refresh.png")), "Update the tool API's commands", self
)
updateAction.triggered.connect(self.showUpdating)
updateToolAction = QAction(
QIcon(self.getIcon("direct-download.png")), "Update the tool", self
)
updateToolAction.triggered.connect(self.checkForUpdate)
self.sessionTime = QLabel("Checking your session... ")
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self.loginBtn = QPushButton("Login")
self.loginBtn.setIcon(QIcon(self.getIcon("login.png")))
self.loginBtn.setStyleSheet("padding: 2px; padding-left: 6px")
self.loginBtn.setIconSize(QSize(12, 12))
self.loginBtn.setLayoutDirection(Qt.RightToLeft)
seperator = QAction(self)
seperator.setSeparator(True)
# create a new window -TODO
# self.toolbar.addAction(openAction)
self.toolbar.addAction(saveAction)
self.toolbar.addAction(seperator)
self.toolbar.addAction(copyAction)
self.toolbar.addAction(seperator)
self.toolbar.addAction(helpAction)
self.toolbar.addAction(seperator)
self.toolbar.addAction(updateAction)
self.toolbar.addAction(updateToolAction)
self.toolbar.addWidget(spacer)
self.toolbar.addWidget(self.sessionTime)
self.toolbar.addWidget(self.loginBtn)
def createMenubar(self):
self.rightMenuBar = QMenuBar()
self.currentVersion = QLabel(__version__)
self.leftMenuBar = QMenuBar()
file = self.leftMenuBar.addMenu("File")
new = QAction("New window", self)
new.setShortcut("Ctrl+n")
save = QAction("Save to file", self)
save.setShortcut("Ctrl+S")
quit = QAction("Quit", self)
quit.setShortcut("Ctrl+q")
# create a new window - TODO
# file.addAction(new)
file.addAction(save)
file.addAction(quit)
edit = self.leftMenuBar.addMenu("Edit")
copy = QAction("Copy", self)
copy.setShortcut("Ctrl+c")
edit.addAction(copy)
help = self.leftMenuBar.addMenu("Help")
help.addAction("About ISPAPI tool")
help.addAction("How to start?")
file.triggered[QAction].connect(self.menuBarActions)
edit.triggered[QAction].connect(self.menuBarActions)
help.triggered[QAction].connect(self.menuBarActions)
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
check = QAction("Current version: " + __version__, self)
self.rightMenuBar.addAction(check)
self.leftMenuBar.setCornerWidget(self.rightMenuBar)
self.leftMenuBar.setStyleSheet("padding: 0px 0px 0px 5px")
def createTopGroupBox(self):
self.topBox = QGroupBox((""))
executeBtn = QPushButton("Execute")
executeBtn.setIcon(QIcon(self.getIcon("execute.png")))
executeBtn.clicked.connect(self.executeCommand)
executeBtn.setIconSize(QSize(14, 14))
# executeBtn.setLayoutDirection(Qt.RightToLeft)
clearBtn = QPushButton("Clear")
clearBtn.setIcon(QIcon(self.getIcon("cross.png")))
clearBtn.setIconSize(QSize(14, 14))
# clearBtn.setLayoutDirection(Qt.RightToLeft)
clearBtn.clicked.connect(self.__clearCMDfield)
self.cmdTxt = QLineEdit()
self.cmdTxt.setPlaceholderText("Enter command here...")
self.cmdTxt.textEdited.connect(self.updateCommandView)
# qSpaceEvent = QKeyEvent(QEvent.KeyPress, Qt.Key_Backspace, Qt.NoModifier)
# self.cmdTxt.keyPressEvent(qSpaceEvent)
self.cmdTxt.installEventFilter(self)
self.cmdTxt.returnPressed.connect(self.executeCommand)
# set command completer
self.completer = QCompleter()
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.cmdTxt.setCompleter(self.completer)
# subuser
self.subuser = QLineEdit()
self.subuser.setPlaceholderText("Type a subuser")
self.subuser.returnPressed.connect(self.executeCommand)
# set command completer
self.subUsercompleter = QCompleter()
self.subUsercompleter.setCaseSensitivity(Qt.CaseInsensitive)
self.subuser.setCompleter(self.subUsercompleter)
self.minParameter = QLabel(self)
self.minParameter.setText("Min parameters: ")
self.minParameter.setStyleSheet("color:gray")
f = QFont("Arial", 9)
self.minParameter.setFont(f)
gridLayout = QGridLayout()
gridLayout.addWidget(self.cmdTxt, 0, 1, 1, 1)
gridLayout.addWidget(self.subuser, 0, 2, 1, 1)
gridLayout.addWidget(executeBtn, 0, 3, 1, 1)
gridLayout.addWidget(clearBtn, 0, 4, 1, 1)
gridLayout.addWidget(self.minParameter, 1, 1, 1, 1)
gridLayout.setColumnStretch(1, 6)
gridLayout.setColumnStretch(2, 2)
gridLayout.setColumnStretch(3, 1)
gridLayout.setColumnStretch(4, 1)
gridLayout.setContentsMargins(5, 0, 5, 10)
self.topLayout = gridLayout
self.topBox.setLayout(gridLayout)
def createLeftGroupBox(self):
self.leftGroupBox = QGroupBox("Command")
self.leftTabWidget = QTabWidget()
self.leftTabWidget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Ignored)
tab1 = QWidget()
self.commandText = QTextEdit()
self.commandText.setPlaceholderText("Extracted command will be shown here")
self.commandText.setAcceptRichText(False)
tab1hbox = QHBoxLayout()
tab1hbox.setContentsMargins(5, 5, 5, 5)
tab1hbox.addWidget(self.commandText)
tab1.setLayout(tab1hbox)
tab2 = QWidget()
# params label
self.batchParamsLabel = QLabel()
self.batchParamsLabel.setText("Select parameter:")
# params list
self.batchParams = QComboBox()
self.batchParams.addItems(self.BATCH_PARAMS)
self.batchParams.setEditable(True)
# params text label
self.batchParamsListLabel = QLabel()
self.batchParamsListLabel.setText("Insert the list:")
self.batchParamsListLabel.setContentsMargins(0, 10, 0, 0)
# params text
self.batchParamsList = QTextEdit()
self.batchParamsList.setPlaceholderText("Enter each item in new line")
self.batchParamsList.setFrameStyle(QFrame.Box)
tableLayout = QGridLayout()
tableLayout.setContentsMargins(15, 5, 5, 5)
tableLayout.addWidget(self.batchParamsLabel, 0, 0)
tableLayout.addWidget(self.batchParams, 1, 0)
tableLayout.addWidget(self.batchParamsListLabel, 2, 0)
tableLayout.addWidget(self.batchParamsList, 3, 0)
tab2.setLayout(tableLayout)
self.leftTabWidget.addTab(tab1, "Extracted Command")
self.leftTabWidget.addTab(tab2, "Batch")
layout = QGridLayout()
layout.addWidget(self.leftTabWidget, 0, 0, 1, 1)
self.leftGroupBox.setLayout(layout)
def createMiddleTabWidget(self):
self.middleGroupBox = QGroupBox("Results")
middleTabWidget = QTabWidget()
middleTabWidget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Ignored)
tab1 = QWidget()
self.plainResponse = QTextEdit()
tab1hbox = QHBoxLayout()
tab1hbox.setContentsMargins(5, 5, 5, 5)
tab1hbox.addWidget(self.plainResponse)
tab1.setLayout(tab1hbox)
tab2 = QWidget()
self.tableResponse = QTableWidget(1, 2)
self.tableResponse.setHorizontalHeaderLabels(["Property", "Value"])
self.tableResponse.horizontalHeader().setStretchLastSection(True)
tableLayout = QGridLayout()
tableLayout.setContentsMargins(5, 5, 5, 5)
tableLayout.addWidget(self.tableResponse, 0, 0)
tab2.setLayout(tableLayout)
tab3 = QWidget()
self.listResponse = QTextEdit()
tab3hbox = QHBoxLayout()
tab3hbox.addWidget(self.listResponse)
tab3.setLayout(tab3hbox)
middleTabWidget.addTab(tab1, "Plain")
middleTabWidget.addTab(tab2, "Properties")
middleTabWidget.addTab(tab3, "List")
layout = QGridLayout()
layout.addWidget(middleTabWidget, 0, 0, 1, 1)
self.middleGroupBox.setLayout(layout)
def openLoginWindow(self):
"""
Start login window
"""
loginGui = LoginWindow(self, self.coreLogic)
loginGui.startGui()
def menuBarActions(self, q):
action = q.text()
if action == "New Window":
pass
if action == "Save to file":
self.saveCommandToFile()
if action == "Quit":
self.closeApplication()
if action == "Copy":
self.copyToClipboard()
if action == "Help":
self.showHelp()
if action == "About ISPAPI tool":
self.showAbout()
if action == "How to start?":
self.showHelp()
def closeApplication(self):
print("exiting")
sys.exit()
def startNewWindow(self):
app = QApplication(sys.argv)
appGui = MainFrame()
appGui.startGui()
sys.exit(app.exec_())
def startGui(self):
geo = QDesktopWidget().availableGeometry()
screenWidth = geo.width()
screenHeight = geo.height()
width = int(screenWidth * 0.5)
height = int(screenHeight * 0.5)
self.resize(width, height)
frameGeo = self.frameGeometry()
cp = geo.center()
frameGeo.moveCenter(cp)
self.move(frameGeo.topLeft())
# start gui
self.show()
def initialiseCommandCompleter(self):
model = QStringListModel()
# get all possible autocomplete strings
stringsSuggestion = []
stringsSuggestion = (self.coreLogic.getCommandList()).splitlines()
# set suggestion to the model
model.setStringList(stringsSuggestion)
# set model to the completer
self.completer.setModel(model)
def initialiseSubuserCompleter(self):
model = QStringListModel()
# get all possible autocomplete strings
stringsSuggestion = []
stringsSuggestion = (self.coreLogic.getSubUserList()).splitlines()
# set suggestion to the model
model.setStringList(stringsSuggestion)
# set model to the completer
self.subUsercompleter.setModel(model)
def __clearCMDfield(self):
self.cmdTxt.clear()
self.cmdTxt.setFocus(True)
def populateResults(self, response, mode="normal"):
# get reulsts
plainResult = response.getPlain()
listResult = response.getListHash()
# set plain results
if mode == "iterative":
self.plainResponse.append(plainResult)
print("iternative")
else:
self.plainResponse.setText(plainResult)
# delete any previous content of the list
self.listResponse.setText("")
# set properties and list
resultLists = listResult["LIST"]
counter = 0
for row in resultLists:
for col in row:
counter += 1
# set the number of rows
self.tableResponse.setRowCount(counter)
# populate the table
rownumber = 0
for row in resultLists:
for i, (key, value) in enumerate(row.items()):
keyWidget = QTableWidgetItem(key)
valueWidget = QTableWidgetItem(value)
self.tableResponse.setItem(rownumber, 0, keyWidget)
self.tableResponse.setItem(rownumber, 1, valueWidget)
# update the list
if key not in ("TOTAL", "FIRST", "LAST", "LIMIT", "COUNT"):
self.listResponse.append(value)
# incerate rownumber
rownumber += 1
# order table content
self.tableResponse.sortItems(Qt.AscendingOrder)
def saveCommandToFile(self):
try:
textToWrite = self.commandAndResponsePlain()
options = QFileDialog.Options()
# options |= QFileDialog.DontUseNativeDialog # Qt's builtin File Dialogue
fileName, _ = QFileDialog.getSaveFileName(
self, "Open", "report.txt", "All Files (*.*)", options=options
)
if fileName:
try:
with open(fileName, "w") as file:
file.write(textToWrite)
alert = QMessageBox()
alert.setText("'" + fileName + "' \n\nFile Saved Successfully!")
alert.setIcon(QMessageBox.Information)
alert.exec_()
except Exception as e:
alert = QMessageBox()
alert.setIcon(QMessageBox.Critical)
alert.setText("Couldn't save the file due to: " + str(e))
alert.exec_()
except Exception as e:
alert = QMessageBox()
alert.setIcon(QMessageBox.Critical)
alert.setText("Request a command first!")
alert.setWindowTitle("Error")
alert.exec_()
def commandAndResponsePlain(self):
result = self.plainResponse.toPlainText()
command = self.response.getCommandPlain()
textToWrite = command + "\n" + result
return textToWrite
def copyToClipboard(self):
try:
newText = self.commandAndResponsePlain()
clipboard = QApplication.clipboard()
clipboard.setText(newText)
except Exception as e:
print(e)
pass # in the case where there is not command requested
def showHelp(self):
box = QMessageBox(self)
msg = """<p align='center'>
<b style='font-size:20px'>Help Information</b>. <br><br><br>
This window provides a simple help view, more detailed help can be found at:
<a href="https://hexonet.github.io/ispapicli/">ISPAPI CLI Tool Documentation</a>
<br><br>
Quick start:
<br>
To show help, type the command: -h | --help
<br>
From there you will find all information about using the command line in both the GUI and terminal
<br><br>
<span style="color:orange">Note</span>: Commands executed in terminal are similar to commands used in the GUI, except for the "--update" command which is only possible to trigger in the terminal
<br><br><br>
Copyright 2020 @Hexonet
<br><br>
</p>
"""
box.setStandardButtons(QMessageBox.Ok)
box.setIcon(QMessageBox.Information)
box.setWindowTitle("Help")
box.setText(msg)
box.show()
def showUpdating(self):
rawFiles = self.scrap.scrapCommands()
status = self.scrap.readRawFiles(rawFiles)
# init tool dropdown autocomplete
if status:
self.initialiseCommandCompleter()
def Handle_Progress(self, blocknum, blocksize, totalsize):
## calculate the progress
readed_data = blocknum * blocksize
if totalsize > 0:
download_percentage = readed_data * 100 / totalsize
self.progressBar.setValue(download_percentage)
QApplication.processEvents()
def checkForUpdate(self):
preBox = QMessageBox(self)
msgNo = """<p align='center'>
Checking for update...
</p>
"""
# preBox.setStandardButtons(QMessageBox.Ok)
preBox.setWindowTitle("Checking...")
preBox.setText(msgNo)
preBox.show()
QApplication.processEvents()
currentVersion = version.parse(__version__)
url = "https://github.com/hexonet/ispapicli/releases/latest"
r = requests.get(url)
if r.ok:
preBox.close()
box = QMessageBox(self)
latestVersion = r.url.split("/")[-1]
latestVersion = version.parse(latestVersion[1:]) # remove the prefix v
if currentVersion == latestVersion:
msgNo = """<p align='center'>
You have the latest version installed.
</p>
"""
box.setStandardButtons(QMessageBox.Ok)
box.setWindowTitle("Updating")
box.setText(msgNo)
box.show()
elif latestVersion > currentVersion:
msgYes = """<p align='center'>
New version available, update now?
</p>
"""
ret = box.question(self, "Updating", msgYes, box.No | box.Yes, box.Yes)
if ret == box.Yes:
# updating the tool
self.updateTool(latestVersion)
else:
box.close()
else:
return
else:
preBox = QMessageBox(self)
msgNo = """<p align='center'>
Please check your internet connection.
</p>
"""
# preBox.setStandardButtons(QMessageBox.Ok)
preBox.setWindowTitle("No Internet")
preBox.setText(msgNo)
preBox.show()
def updateTool(self, latestVersion):
fileName = ""
if sys.platform == "win32":
fileName = "win-binary-latest.zip" # fileName = "win-binary-%s.zip" % str(latestVersion)
elif sys.platform == "darwin":
fileName = "macos-binary-latest.zip"
elif sys.platform == "linux":
fileName = "linux-binary-latest.zip"
else:
return
# init download
url = "https://github.com/hexonet/ispapicli/releases/download/v%s/%s" % (
latestVersion,
fileName,
)
print(url)
import urllib
# Copy a network object to a local file
try:
# dwonload our zipped tool
fileDownloaded, response = urllib.request.urlretrieve(
url, fileName, self.Handle_Progress
)
if response and fileDownloaded:
# unzip the tool
import zipfile
with zipfile.ZipFile(fileName, "r") as zip_ref:
result = zip_ref.extractall("tmp")
# start the new tool
if sys.platform == "win32" and result is None:
# updating the tool
newToolName = "ispapicli-%s" % str(latestVersion) + ".exe"
# rename the newly donwloaded tool
os.rename(r"tmp/ispapicli.exe", newToolName)
# clean the directoy
os.remove(fileDownloaded)
os.rmdir("tmp")
# start the new tool
os.system("" + newToolName)
self.closeApplication()
elif (
sys.platform == "linux" or sys.platform == "darwin"
) and result is None:
newToolName = "ispapicli-%s" % str(latestVersion)
# rename the newly donwloaded tool
os.rename(r"tmp/ispapicli", newToolName)
# clean the directoy
os.remove(fileDownloaded)
os.rmdir("tmp")
# updating the tool
os.system("sudo chmod +x " + newToolName)
os.system("./" + newToolName + " &")
self.closeApplication()
# TODO clean the old version by sending an argument
# process = subprocess.Popen(
# ["sudo chmod +x " + newToolName, "./" + newToolName]
# )
else:
return
else:
raise Exception
except Exception as e:
msgBox = QMessageBox(self)
msgNo = (
"""<p align='center'>
Problem to download: %s
</p>
"""
% e
)
# preBox.setStandardButtons(QMessageBox.Ok)
msgBox.setWindowTitle("Download")
msgBox.setText(msgNo)
msgBox.show()
# os.system(
# "gnome-terminal -- bash -c './" + scriptPath + latestVersion + ";bash'"
# )
def showAbout(self):
box = QMessageBox(self)
msg = """<p align='center'>
<b style='font-size:20px'>ISPAPI Tool</b>. <br><br><br>
Version: %s <br><br>
A simple command line interface to connect you to your account on Hexonet
<br><br>
Technical Support:
<br>
Email: [email protected]
<br>
Website: <a href="https://hexonet.github.io/ispapicli/">ISPAPI CLI Tool</a>
<br><br><br>
Copyright 2020 @Hexonet
<br><br>
</p>
"""
box.setStandardButtons(QMessageBox.Ok)
# box.setIcon(QMessageBox.Information)
box.setWindowTitle("About")
box.setText(msg % __version__)
box.show()
def eventFilter(self, source, event):
# this function to handle autocomplete for command line
if event.type() == QEvent.KeyRelease and source is self.cmdTxt:
if event.key() == Qt.Key_Space:
# show min paramters suggestions
try:
cmd = self.cmdTxt.text()
m = re.match("^(\w+)\s$", cmd)
if m:
minParams = self.coreLogic.getMinParameters(cmd.strip())
if len(minParams) > 0:
minParamsLabel = ", ".join(minParams)
minParamsInput = "= ".join(minParams)
cursorPosition = (
len(self.cmdTxt.text() + minParams[0]) + 1
) # for the '=' char
self.cmdTxt.setText(cmd + minParamsInput + "=")
self.minParameter.setText(
"Min parameters: " + minParamsLabel
)
self.cmdTxt.setCursorPosition(cursorPosition)
else:
self.minParameter.setText("Min parameters:")
except Exception as e:
print(e)
# must return bool value
return super(MainFrame, self).eventFilter(source, event)
def getIcon(self, iconName):
##
# This function checks if the app is executable or in development and return the path
if getattr(sys, "frozen", False):
self.absolute_dirpath = os.path.dirname(sys.executable)
try:
self.absolute_dirpath = sys._MEIPASS
except Exception:
self.absolute_dirpath = os.path.abspath(".")
path = self.command_path = os.path.join(
self.absolute_dirpath, "data/icons/" + iconName
)
elif __file__:
self.absolute_dirpath = os.path.dirname(__file__)
path = self.command_path = os.path.join(
self.absolute_dirpath, "../icons/" + iconName
)
return path
def getScriptsPath(self, system):
"""
Return the script path
"""
path = "scripts/"
# scripts/linux-download.sh
# check which platform
if system == "linux":
path = path + "linux-download.sh"
elif system == "windows":
path = path + "win-download.ps1"
else:
raise Exception
return path
| 36.81344 | 202 | 0.55802 |
79457d8f4ca32fa2ce2498098b0cfa7a0810a942 | 971 | py | Python | upvote/gae/lib/analysis/monitoring_test.py | iwikmai/upvote | 77bb200d0e35a28cc5aed98ceee8e234998814b6 | [
"Apache-2.0"
] | 453 | 2017-10-24T15:29:44.000Z | 2021-09-27T23:21:20.000Z | upvote/gae/lib/analysis/monitoring_test.py | iwikmai/upvote | 77bb200d0e35a28cc5aed98ceee8e234998814b6 | [
"Apache-2.0"
] | 58 | 2018-03-23T21:19:16.000Z | 2021-05-23T20:06:05.000Z | upvote/gae/lib/analysis/monitoring_test.py | iwikmai/upvote | 77bb200d0e35a28cc5aed98ceee8e234998814b6 | [
"Apache-2.0"
] | 36 | 2018-03-23T21:25:54.000Z | 2021-09-27T23:21:24.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for monitoring.py."""
from common.testing import basetest
from upvote.gae.lib.analysis import api # pylint: disable=unused-import
class PlaceholderTest(basetest.AppEngineTestCase):
"""Protects against errors encountered upon import. Replace when prudent."""
def testTrue(self):
self.assertTrue(True)
if __name__ == '__main__':
basetest.main()
| 32.366667 | 78 | 0.757981 |
79457db7283df577c47b7a48ae2d910bb28d2f5a | 9,035 | py | Python | contrib/testgen/gen_key_io_test_vectors.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | contrib/testgen/gen_key_io_test_vectors.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | contrib/testgen/gen_key_io_test_vectors.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2012-2020 The shitecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_valid.json
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
from segwit_addr import bech32_encode, decode_segwit_address, convertbits, CHARSET
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PUBKEY_ADDRESS_REGTEST = 111
SCRIPT_ADDRESS_REGTEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
PRIVKEY_REGTEST = 239
# script
OP_0 = 0x00
OP_1 = 0x51
OP_2 = 0x52
OP_16 = 0x60
OP_DUP = 0x76
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_HASH160 = 0xa9
OP_CHECKSIG = 0xac
pubkey_prefix = (OP_DUP, OP_HASH160, 20)
pubkey_suffix = (OP_EQUALVERIFY, OP_CHECKSIG)
script_prefix = (OP_HASH160, 20)
script_suffix = (OP_EQUAL,)
p2wpkh_prefix = (OP_0, 20)
p2wsh_prefix = (OP_0, 32)
metadata_keys = ['isPrivkey', 'chain', 'isCompressed', 'tryCaseFlip']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata, output_prefix, output_suffix
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, 'main', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS,), 20, (), (False, 'main', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'test', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'test', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), script_prefix, script_suffix),
((PRIVKEY,), 32, (), (True, 'main', False, None), (), ()),
((PRIVKEY,), 32, (1,), (True, 'main', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'test', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'test', True, None), (), ()),
((PRIVKEY_REGTEST,), 32, (), (True, 'regtest', False, None), (), ()),
((PRIVKEY_REGTEST,), 32, (1,), (True, 'regtest', True, None), (), ())
]
# templates for valid bech32 sequences
bech32_templates = [
# hrp, version, witprog_size, metadata, output_prefix
('bc', 0, 20, (False, 'main', None, True), p2wpkh_prefix),
('bc', 0, 32, (False, 'main', None, True), p2wsh_prefix),
('bc', 1, 2, (False, 'main', None, True), (OP_1, 2)),
('tb', 0, 20, (False, 'test', None, True), p2wpkh_prefix),
('tb', 0, 32, (False, 'test', None, True), p2wsh_prefix),
('tb', 2, 16, (False, 'test', None, True), (OP_2, 16)),
('bcrt', 0, 20, (False, 'regtest', None, True), p2wpkh_prefix),
('bcrt', 0, 32, (False, 'regtest', None, True), p2wsh_prefix),
('bcrt', 16, 40, (False, 'regtest', None, True), (OP_16, 40))
]
# templates for invalid bech32 sequences
bech32_ng_templates = [
# hrp, version, witprog_size, invalid_bech32, invalid_checksum, invalid_char
('tc', 0, 20, False, False, False),
('tb', 17, 32, False, False, False),
('bcrt', 3, 1, False, False, False),
('bc', 15, 41, False, False, False),
('tb', 0, 16, False, False, False),
('bcrt', 0, 32, True, False, False),
('bc', 0, 16, True, False, False),
('tb', 0, 32, False, True, False),
('bcrt', 0, 20, False, False, True)
]
def is_valid(v):
'''Check vector v for validity'''
if len(set(v) - set(b58chars)) > 0:
return is_valid_bech32(v)
result = b58decode_chk(v)
if result is None:
return is_valid_bech32(v)
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return is_valid_bech32(v)
def is_valid_bech32(v):
'''Check vector v for bech32 validity'''
for hrp in ['bc', 'tb', 'bcrt']:
if decode_segwit_address(hrp, v) != (None, None):
return True
return False
def gen_valid_base58_vector(template):
'''Generate valid base58 vector'''
prefix = bytearray(template[0])
payload = bytearray(os.urandom(template[1]))
suffix = bytearray(template[2])
dst_prefix = bytearray(template[4])
dst_suffix = bytearray(template[5])
rv = b58encode_chk(prefix + payload + suffix)
return rv, dst_prefix + payload + dst_suffix
def gen_valid_bech32_vector(template):
'''Generate valid bech32 vector'''
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
dst_prefix = bytearray(template[4])
rv = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))
return rv, dst_prefix + witprog
def gen_valid_vectors():
'''Generate valid test vectors'''
glist = [gen_valid_base58_vector, gen_valid_bech32_vector]
tlist = [templates, bech32_templates]
while True:
for template, valid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
rv, payload = valid_vector_generator(template)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = payload.hex()
yield (rv, hexrepr, metadata)
def gen_invalid_base58_vector(template):
'''Generate possibly invalid vector'''
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
corrupt_prefix = randbool(0.2)
randomize_payload_size = randbool(0.2)
corrupt_suffix = randbool(0.2)
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = bytearray(template[2])
val = b58encode_chk(prefix + payload + suffix)
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
return val
def gen_invalid_bech32_vector(template):
'''Generate possibly invalid bech32 vector'''
no_data = randbool(0.1)
to_upper = randbool(0.1)
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
if no_data:
rv = bech32_encode(hrp, [])
else:
data = [witver] + convertbits(witprog, 8, 5)
if template[3] and not no_data:
if template[2] % 5 in {2, 4}:
data[-1] |= 1
else:
data.append(0)
rv = bech32_encode(hrp, data)
if template[4]:
i = len(rv) - random.randrange(1, 7)
rv = rv[:i] + random.choice(CHARSET.replace(rv[i], '')) + rv[i + 1:]
if template[5]:
i = len(hrp) + 1 + random.randrange(0, len(rv) - len(hrp) - 4)
rv = rv[:i] + rv[i:i + 4].upper() + rv[i + 4:]
if to_upper:
rv = rv.swapcase()
return rv
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
glist = [gen_invalid_base58_vector, gen_invalid_bech32_vector]
tlist = [templates, bech32_ng_templates]
while True:
for template, invalid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
val = invalid_vector_generator(template)
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys
import json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| 36.578947 | 133 | 0.617488 |
79457e7a5ed0cbccf19e07c50494cc6b06aa0a1f | 59,161 | py | Python | tensorflow/python/client/session_test.py | jdehotin/TensorFlow | a6c5f8e4e013e54fed8dfcf49fb6de365f018022 | [
"Apache-2.0"
] | 6 | 2016-09-07T18:38:41.000Z | 2020-01-12T23:01:03.000Z | tensorflow/python/client/session_test.py | jdehotin/TensorFlow | a6c5f8e4e013e54fed8dfcf49fb6de365f018022 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/client/session_test.py | jdehotin/TensorFlow | a6c5f8e4e013e54fed8dfcf49fb6de365f018022 | [
"Apache-2.0"
] | 8 | 2017-06-08T09:46:06.000Z | 2021-06-20T14:03:19.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[4])
self.assertEqual(63.0, sess.run(v))
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFeedSparePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01',
u'\u26a3 unicode', u'\U0001f60e deal with it']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testRunAndPartialRun(self):
with session.Session() as sess:
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.mul(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
if __name__ == '__main__':
googletest.main()
| 40.000676 | 80 | 0.634557 |
79457eaf585414e2b86f2778ecb75c9f439c725c | 863 | py | Python | Medium/0015. 3Sum/0015. 3Sum_version1.py | FlyProbe/LeetCode-Solutions | 4489722573ecc2d4358ba5120e2af3d7407bb3b1 | [
"Apache-2.0"
] | 1 | 2019-07-25T06:43:35.000Z | 2019-07-25T06:43:35.000Z | Medium/0015. 3Sum/0015. 3Sum_version1.py | FlyProbe/LeetCode-Solutions | 4489722573ecc2d4358ba5120e2af3d7407bb3b1 | [
"Apache-2.0"
] | null | null | null | Medium/0015. 3Sum/0015. 3Sum_version1.py | FlyProbe/LeetCode-Solutions | 4489722573ecc2d4358ba5120e2af3d7407bb3b1 | [
"Apache-2.0"
] | null | null | null | # 固定数字+双指针。由于预先排序,1、固定数字最小,大于0即可停止循环;2、重复的固定数字没有意义,可以跳过
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
l = len(nums)
res = {}
nums.sort()
pre = None
for i in range(l-2):
if nums[i] > 0: # 最小数字大于0,停止
break
if nums[i] == pre: # 跳过重复的固定数字
continue
pre = nums[i]
j = i+1
k = l-1
while j < k:
if nums[i] + nums[j] + nums[k] < 0:
j += 1
elif nums[i] + nums[j] + nums[k] > 0:
k -= 1
else:
res[(nums[i], nums[j], nums[k])] = 1
j += 1
sol = []
for keys in res:
sol.append(list(keys))
return sol
| 26.96875 | 59 | 0.36153 |
79457f9c77305813d13739661b25cdf89350c686 | 694 | py | Python | autoApply.py | pyl/autoApply | 5aa41529ae4630af8717dd653b300a9486e21199 | [
"MIT"
] | null | null | null | autoApply.py | pyl/autoApply | 5aa41529ae4630af8717dd653b300a9486e21199 | [
"MIT"
] | null | null | null | autoApply.py | pyl/autoApply | 5aa41529ae4630af8717dd653b300a9486e21199 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import pprint
from collections import OrderedDict
url = "https://github.com/pittcsc/Summer2021-Internships"
source = requests.get(url).text
soup = BeautifulSoup(source, 'lxml')
table = soup.table
#print(soup.prettify) contains no tables but print(soup.table)
# works somehow
rows = table.find_all('tr')
rows.pop(0)
links = []
linkdict = OrderedDict()
for x in rows:
if x.td.a is not None:
try:
linkdict[x.td.a.text] = x.td.a.get('href')
except:
print("ERROR" + x.td.a)
else:
linkdict[x.td.text] = None
collections.OrderedDict(reversed(list(tempdict.items())))
pprint.pprint(linkdict) | 22.387097 | 63 | 0.680115 |
79457fd27f3ab83b35976da22be8c7fc24e4069c | 12,219 | py | Python | tests/test_atc_tasks.py | kpressouyre/nornir_f5 | 9e36ae3e19ac6d45d3858579856fbcc196fe512f | [
"Apache-2.0"
] | 1 | 2021-03-23T04:21:08.000Z | 2021-03-23T04:21:08.000Z | tests/test_atc_tasks.py | kpressouyre/nornir_f5 | 9e36ae3e19ac6d45d3858579856fbcc196fe512f | [
"Apache-2.0"
] | 1 | 2021-12-01T19:42:14.000Z | 2021-12-01T19:42:14.000Z | tests/test_atc_tasks.py | kpressouyre/nornir_f5 | 9e36ae3e19ac6d45d3858579856fbcc196fe512f | [
"Apache-2.0"
] | 1 | 2021-11-25T18:12:47.000Z | 2021-11-25T18:12:47.000Z | import json
import re
import pytest
import responses
from nornir_f5.plugins.tasks import atc
from .conftest import assert_result, base_decl_dir, base_resp_dir, load_json
@pytest.mark.parametrize(
("kwargs", "resp", "task_statuses", "expected"),
[
# GET AS3 declaration with show and show_hash
(
{"as3_show": "full", "as3_show_hash": True, "atc_service": "AS3"},
{"status_code": 200, "data": f"{base_decl_dir}/atc/as3/simple_01.json"},
[""],
{"result_file": f"{base_decl_dir}/atc/as3/simple_01.json"},
),
# Dry-run
(
{"atc_service": "AS3", "dry_run": True},
{"status_code": 200, "data": f"{base_decl_dir}/atc/as3/simple_01.json"},
[""],
{"result": None},
),
# GET declaration with invalid atc_service
(
{"atc_declaration": {"class": "AS2"}, "atc_service": "AS2"},
{},
[""],
{"result": "ATC service 'AS2' is not valid.", "failed": True},
),
# POST AS3 declaration without atc_service
(
{
"as3_tenant": "Simple_01",
"atc_declaration": {"class": "AS3"},
"atc_method": "POST",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/as3/declaration_successfully_submitted.json", # noqa B950
},
["in progress", "success"],
{"result": "ATC declaration successfully deployed.", "changed": True},
),
# POST AS3 declaration from file
(
{
"as3_tenant": "Simple_01",
"atc_declaration_file": f"{base_decl_dir}/atc/as3/simple_01.json",
"atc_method": "POST",
"atc_service": "AS3",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/as3/declaration_successfully_submitted.json", # noqa B950
},
["in progress", "success"],
{"result": "ATC declaration successfully deployed.", "changed": True},
),
# POST AS3 declaration from url
(
{
"as3_tenant": "Simple_01",
"atc_declaration_url": "https://test.com/simple_01.json",
"atc_method": "POST",
"atc_service": "AS3",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/as3/declaration_successfully_submitted.json", # noqa B950
},
["in progress", "no change"],
{
"result": "ATC declaration successfully submitted, but no change required.", # noqa B950
},
),
# POST AS3 declaration, failed
(
{
"atc_declaration": {"class": "AS3"},
"atc_method": "POST",
"atc_service": "AS3",
"as3_tenant": "Simple_01",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/as3/declaration_failed.json",
},
[],
{"result": "The declaration deployment failed.", "failed": True},
),
# POST AS3 declaration, timeout
(
{
"atc_declaration": {"class": "AS3"},
"atc_method": "POST",
"atc_service": "AS3",
"as3_tenant": "Simple_01",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/as3/declaration_successfully_submitted.json", # noqa B950
},
["in progress"],
{"result": "The task has reached maximum retries.", "failed": True},
),
# POST AS3 declaration, error message
(
{
"atc_declaration": {"class": "AS3"},
"atc_method": "POST",
"atc_service": "AS3",
"as3_tenant": "Simple_01",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/as3/declaration_successfully_submitted.json", # noqa B950
},
["in progress", "failed"],
{"result": "The task failed.", "failed": True},
),
# DELETE AS3 declaration
(
{
"as3_tenant": "Simple_01",
"atc_declaration": {"class": "AS3"},
"atc_method": "DELETE",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/as3/declaration_successfully_submitted.json", # noqa B950
},
["in progress", "success"],
{"result": "ATC declaration successfully deployed.", "changed": True},
),
# PATCH AS3 declaration, invalid atc method
(
{
"atc_declaration": {"class": "AS3"},
"atc_method": "PATCH",
"atc_service": "AS3",
},
{},
[""],
{"result": "ATC method 'PATCH' is not valid.", "failed": True},
),
],
)
@pytest.mark.parametrize("as3_version", ["3.4.0", "3.22.1"])
@responses.activate
def test_as3_deploy(nornir, kwargs, resp, task_statuses, expected, as3_version):
task_id = "4eb601c4-7f06-4fd7-b8d5-947e7b206a37"
# Callback to provide dynamic task status responses
def get_task_callback(request):
calls = [
d
for d in responses.calls
if f"/mgmt/shared/appsvcs/task/{task_id}" in d.request.url
]
if len(calls) == 0:
current_task_status = task_statuses[0]
elif len(calls) < len(task_statuses):
current_task_status = task_statuses[len(calls)]
else:
current_task_status = task_statuses[len(task_statuses) - 1]
return (
200,
{},
json.dumps(
load_json(
f"{base_resp_dir}/atc/as3/task_{current_task_status.replace(' ', '_').lower()}.json" # noqa B950
)
),
)
# Register mock responses
# GET AS3 declaration from url
responses.add(
responses.GET,
"https://test.com/simple_01.json",
json=load_json(f"{base_decl_dir}/atc/as3/simple_01.json"),
status=200,
)
# GET AS3 info
responses.add(
responses.GET,
"https://bigip1.localhost:443/mgmt/shared/appsvcs/info",
json=load_json(f"{base_resp_dir}/atc/as3/version_{as3_version}.json"),
status=200,
)
# GET AS3 task
responses.add_callback(
responses.GET,
f"https://bigip1.localhost:443/mgmt/shared/appsvcs/task/{task_id}",
callback=get_task_callback,
)
if resp:
responses_data = load_json(resp["data"])
responses.add(
kwargs["atc_method"] if "atc_method" in kwargs else "GET",
re.compile(
"https://bigip1.localhost:443/mgmt/shared/appsvcs/declare(/Simple_01)?"
),
match_querystring=False,
json=responses_data,
status=resp["status_code"],
)
# Run task
nornir = nornir.filter(name="bigip1.localhost")
result = nornir.run(
name="Deploy AS3 Declaration",
task=atc,
atc_delay=0,
atc_retries=3,
**kwargs,
)
# Assert result
assert_result(result, expected)
@pytest.mark.parametrize(
("kwargs", "resp", "task_statuses", "expected"),
[
# POST DO declaration from file
(
{
"atc_declaration_file": f"{base_decl_dir}/atc/device/basic.json",
"atc_method": "POST",
"atc_service": "Device",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/device/task_processing.json",
},
["processing", "success"],
{"result": "ATC declaration successfully deployed.", "changed": True},
),
],
)
@responses.activate
def test_do_deploy(nornir, kwargs, resp, task_statuses, expected):
task_id = "5eb601c4-7f06-4fd7-b8d5-947e7b206a38"
# Callback to provide dynamic task status responses
def get_task_callback(request):
calls = [
d
for d in responses.calls
if f"/mgmt/shared/declarative-onboarding/task/{task_id}" in d.request.url
]
if len(calls) == 0:
current_task_status = task_statuses[0]
elif len(calls) < len(task_statuses):
current_task_status = task_statuses[len(calls)]
else:
current_task_status = task_statuses[len(task_statuses) - 1]
return (
200,
{},
json.dumps(
load_json(
f"{base_resp_dir}/atc/device/task_{current_task_status.replace(' ', '_').lower()}.json" # noqa B950
)
),
)
# Register mock responses
# GET DO info
responses.add(
responses.GET,
"https://bigip1.localhost:443/mgmt/shared/declarative-onboarding/info",
json=load_json(f"{base_resp_dir}/atc/device/version_3.22.1.json"),
status=200,
)
# GET DO task
responses.add_callback(
responses.GET,
f"https://bigip1.localhost:443/mgmt/shared/declarative-onboarding/task/{task_id}", # noqa B950
callback=get_task_callback,
)
if resp:
responses_data = load_json(resp["data"])
responses.add(
kwargs["atc_method"] if "atc_method" in kwargs else "GET",
"https://bigip1.localhost:443/mgmt/shared/declarative-onboarding",
match_querystring=False,
json=responses_data,
status=resp["status_code"],
)
# Run task
nornir = nornir.filter(name="bigip1.localhost")
result = nornir.run(
name="Deploy DO Declaration",
task=atc,
atc_delay=0,
atc_retries=3,
**kwargs,
)
# Assert result
assert_result(result, expected)
@pytest.mark.parametrize(
("kwargs", "resp", "expected"),
[
# POST TS declaration from file
(
{
"atc_declaration_file": f"{base_decl_dir}/atc/telemetry/default_pull_consumer.json", # noqa B950
"atc_method": "POST",
"atc_service": "Telemetry",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/telemetry/success.json",
},
{
"result_file": f"{base_resp_dir}/atc/telemetry/success.json",
},
),
# POST TS declaration, failed
(
{
"atc_declaration": {"class": "Telemetry"},
"atc_method": "POST",
"atc_service": "Telemetry",
},
{
"status_code": 200,
"data": f"{base_resp_dir}/atc/telemetry/failed.json",
},
{"result": "The declaration deployment failed.", "failed": True},
),
],
)
@responses.activate
def test_ts_deploy(nornir, kwargs, resp, expected):
# Register mock responses
# GET TS info
responses.add(
responses.GET,
"https://bigip1.localhost:443/mgmt/shared/telemetry/info",
json=load_json(f"{base_resp_dir}/atc/telemetry/version_1.17.0.json"),
status=200,
)
if resp:
responses_data = load_json(resp["data"])
responses.add(
kwargs["atc_method"] if "atc_method" in kwargs else "GET",
"https://bigip1.localhost:443/mgmt/shared/telemetry/declare",
json=responses_data,
status=resp["status_code"],
)
# Run task
nornir = nornir.filter(name="bigip1.localhost")
result = nornir.run(
name="Deploy TS Declaration",
task=atc,
atc_delay=0,
atc_retries=3,
**kwargs,
)
# Assert result
assert_result(result, expected)
| 31.820313 | 120 | 0.513299 |
794580af9835eca63a78ba64195d2bddae497d2e | 12,457 | py | Python | trusd/cli.py | mathiasbockwoldt/TruSD | 7d0ec42e46e706eb9cf4de1b92a29f18a85159d9 | [
"MIT"
] | null | null | null | trusd/cli.py | mathiasbockwoldt/TruSD | 7d0ec42e46e706eb9cf4de1b92a29f18a85159d9 | [
"MIT"
] | 3 | 2021-03-10T12:36:37.000Z | 2021-03-11T09:51:49.000Z | trusd/cli.py | mathiasbockwoldt/TruSD | 7d0ec42e46e706eb9cf4de1b92a29f18a85159d9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''
This is the command line interface for TruSD and auxilliary scripts. As such,
this module is only meant for use from the command line. For information about
TruSD, please refer to help(trusd) or https://github.com/mathiasbockwoldt/TruSD .
'''
import argparse
import json
import sys
import numpy as np
def parse_string_as_list(string, func, name, expected_length=0):
'''
Split a string by comma (,) and apply a function on each value. The function
is usually int or float to turn the values into numbers.
@param string: The string to parse
@param func: Function to apply on each value of the list
@param name: The name to report in case of an error
@param expected_length: Expected length (as integer) of the resulting list
@returns: A list with the values defined by the parameters
'''
lst = string.split(',')
if expected_length and len(lst) != expected_length:
print(
'Input {} has only {} elements, but should have {}.'.format(
name, len(lst), expected_length
), file=sys.stderr)
sys.exit(1)
try:
lst = [func(x) for x in lst]
except ValueError:
print(
'Elements in input {} must be of type {}'.format(
name, func.__name__
), file=sys.stderr)
sys.exit(1)
return lst
def main():
'''
Main function for the command line interface of TruSD. Parse the command
line arguments and read a file, calculate the likelihoods in a grid and save
the results to another file.
'''
import trusd
parser = argparse.ArgumentParser(description='''
TruSD co-infers selection coefficients and genetic drift from allele
trajectories using a maximum-likelihood framework.''')
parser.add_argument('infile', metavar='file.txt',
help='path to input file')
parser.add_argument('-d', '--delimiter', metavar='x', default=',',
help='''delimiter for input file. Use "tab" or "space"
for these special characters. [default: %(default)s]''')
parser.add_argument('-c', '--colskip', metavar='n', default=0, type=int,
help='''number of columns to skip from the beginning
(left) [default: %(default)s]''')
parser.add_argument('-o', '--outfile', metavar='out.csv', default='outfile.csv',
help='output file [default: %(default)s]')
parser.add_argument('-n', '--noinfo', action='store_true',
help='''if set, no informational json file will be
written along with the result table.''')
parser.add_argument('-g', '--genepop', metavar='int', default=200, type=int,
help='population size [default: %(default)s]')
parser.add_argument('-p', '--proportion', metavar='start,stop,step',
default='0,1,0.005',
help='''proportion; give in the form start,stop,step
without whitespace, where the values are integers or
floats. Mutually exclusive with -P/--proplist.
[default: %(default)s]''')
parser.add_argument('-P', '--proplist', metavar='p1,p2,...',
help='''list of proportions; give in the form
p1,p2,p3,... without whitespace, where px are integers
or floats. Mutually exclusive with -p/--proportion.
[default: %(default)s]''')
parser.add_argument('-s', '--selection', metavar='start,stop,step',
default='-0.08,0.08,0.002',
help='''selection coefficient; give in the form
start,stop,step without whitespace, where the values are
integers or floats. Mutually exclusive with
-S/--seleclist. [default: %(default)s]''')
parser.add_argument('-S', '--seleclist', metavar='s1,s2,...',
help='''list of selection coefficients; give in the form
s1,s2,s3,... without whitespace, where sx are integers
or floats. Mutually exclusive with -s/--selection.
[default: %(default)s]''')
parser.add_argument('-t', '--times', metavar='t1,t2,...', default='0,50',
help='''time stemps; give in the form t1,t2,t3,...
without whitespace, where tx are integers.
[default: %(default)s]''')
args = parser.parse_args()
if args.proplist:
prop_list = np.array(
parse_string_as_list(args.proplist, float, '--proplist')
)
else:
prop = parse_string_as_list(args.proportion, float, '--proportion', 3)
prop_list = np.arange(prop[0], prop[1] + prop[2], prop[2])
if args.seleclist:
selec_list = np.array(
parse_string_as_list(args.seleclist, float, '--seleclist')
)
else:
selec = parse_string_as_list(args.selection, float, '--selection', 3)
selec_list = np.arange(selec[0], selec[1] + selec[2], selec[2])
times = parse_string_as_list(args.times, int, '--times')
if args.delimiter == 'tab':
args.delimiter = '\t'
elif args.delimiter == 'space':
args.delimiter = ' '
trajectories = trusd.read_trajectory_file(
args.infile,
delimiter=args.delimiter,
skip_columns=args.colskip
)
results = trusd.likelihood_grid(
trajectories,
args.genepop,
prop_list,
selec_list,
times
)
np.savetxt(args.outfile, results, delimiter=',')
if not args.noinfo:
trusd.write_info_file(
input_file = args.infile,
output_file = args.outfile,
command = ' '.join(sys.argv),
pop_size = args.genepop,
times = times,
proportions = list(prop_list),
selection_coefficients = list(selec_list),
delimiter = args.delimiter
)
def simulate():
'''
Main function for the command line interface for TruSD simulate. Parse the
command line arguments, simulate trajectories and save the results to files.
'''
import trusd.simulate as sim
parser = argparse.ArgumentParser(description='''
TruSD simulate simulates evolutionary trajectories based on given
parameters.''')
parser.add_argument('-d', '--delimiter', metavar='x', default=',',
help='''delimiter for output files. Use "tab" or "space"
for these special characters. [default: %(default)s]''')
parser.add_argument('-o', '--outdir', metavar='out/', default='.',
help='output directory [default: %(default)s]')
parser.add_argument('-s', '--sums', metavar='s1,s2,...', default='10',
help='''list of sums of trajectories; give in the form
s1,s2,s3,... without whitespace, where sx are integers
or floats. [default: %(default)s]''')
parser.add_argument('-t', '--times', metavar='t1,t2,...', default='10,20',
help='''time stemps; give in the form t1,t2,t3,...
without whitespace, where tx are integers.
[default: %(default)s]''')
parser.add_argument('-S', '--seleclist', metavar='s1,s2,...', default='-0.05,0,0.05',
help='''list of selection coefficients; give in the form
s1,s2,s3,... without whitespace, where sx are integers
or floats. [default: %(default)s]''')
parser.add_argument('-P', '--proplist', metavar='p1,p2,...', default='0.1,0.5,0.9',
help='''list of proportions; give in the form
p1,p2,p3,... without whitespace, where px are integers
or floats. [default: %(default)s]''')
parser.add_argument('-g', '--genepop', metavar='int', default=200, type=int,
help='population size [default: %(default)s]')
parser.add_argument('-G', '--generations', metavar='int', default=50, type=int,
help='number of generations [default: %(default)s]')
parser.add_argument('-f', '--startfreq', metavar='float', default=0.5, type=float,
help='start frequency of allele a [default: %(default)s]')
parser.add_argument('--seed', metavar='int', type=int,
help='''seed for the pseudo random number generation for
reproducability. If none is given, the PRNG is
initialized according to Python defaults.''')
args = parser.parse_args()
sums_list = np.array(parse_string_as_list(args.sums, int, '--sums'))
times_list = np.array(parse_string_as_list(args.times, int, '--times'))
selec_list = np.array(parse_string_as_list(args.seleclist, float, '--seleclist'))
prop_list = np.array(parse_string_as_list(args.proplist, float, '--proplist'))
if args.delimiter == 'tab':
args.delimiter = '\t'
elif args.delimiter == 'space':
args.delimiter = ' '
if args.seed:
sim.init_prng(args.seed)
sim.run_group_of_simulations(
sums_of_trajectories = sums_list,
time_points = times_list,
sel_coeffs = selec_list,
proportions = prop_list,
pop_size = args.genepop,
generations = args.generations,
start_freq = args.startfreq,
outdir = args.outdir,
delimiter = args.delimiter
)
def plot():
'''
Main function for the command line interface for TruSD plot. Parse the
command line arguments, open files and plot them.
'''
import trusd.plot as tplot
parser = argparse.ArgumentParser(description='''
TruSD plot plots evolutionary trajectories based on given parameters.''')
parser.add_argument('infile', metavar='in.csv',
help='path to input file')
parser.add_argument('-i', '--infofile', metavar='in.json',
help='''path to info file with the plotting parameters.
If none is given, -p/-P and -s/-S must be given!''')
parser.add_argument('-d', '--delimiter', metavar='x', default=',',
help='''delimiter for input file. Use "tab" or "space"
for these special characters. Overwritten by `--infofile`.
[default: %(default)s]''')
parser.add_argument('-p', '--proportion', metavar='start,stop,step',
default='0,1,0.005',
help='''proportion; give in the form start,stop,step
without whitespace, where the values are integers or
floats. Mutually exclusive with -P/--proplist.
Not needed, when `--infofile` is given, which will
override this value. [default: %(default)s]''')
parser.add_argument('-P', '--proplist', metavar='p1,p2,...',
help='''list of proportions; give in the form
p1,p2,p3,... without whitespace, where px are integers
or floats. Mutually exclusive with -p/--proportion.
Not needed, when `--infofile` is given, which will
override this value. [default: %(default)s]''')
parser.add_argument('-s', '--selection', metavar='start,stop,step',
default='-0.08,0.08,0.002',
help='''selection coefficient; give in the form
start,stop,step without whitespace, where the values are
integers or floats. Mutually exclusive with
-S/--seleclist. Not needed, when `--infofile` is given,
which will override this value. [default: %(default)s]''')
parser.add_argument('-S', '--seleclist', metavar='s1,s2,...',
help='''list of selection coefficients; give in the form
s1,s2,s3,... without whitespace, where sx are integers
or floats. Mutually exclusive with -s/--selection.
Not needed, when `--infofile` is given, which will
override this value. [default: %(default)s]''')
parser.add_argument('-n', '--numtraj', metavar='n', default=500, type=int,
help='''number of trajectories. This value is only used
for information on the top of the plot.
[default: %(default)s]''')
parser.add_argument('-c', '--contourline', metavar='n', default=1.92, type=float,
help='''subtract this value to display the contour line.
Somewhat arbitrary; try various values. Set to 0 to
hide the line.
[default: %(default)s]''')
parser.add_argument('-o', '--outfile', metavar='out.pdf',
help='''save plot to this filename. Choose file type by
extension. Typical extensions are: pdf, png, tiff, svg.
Check your local matplotlib installation for other
possible file extensions. If this argument is missing,
nothing will be saved.''')
parser.add_argument('-v', '--view', action='store_true',
help='if set, show the plot in an interactive window')
args = parser.parse_args()
if args.infofile:
info = json.load(open(args.infofile))
selec_list = info['selection_coefficients']
prop_list = info['proportions']
delimiter = info['delimiter']
else:
if args.proplist:
prop_list = np.array(
parse_string_as_list(args.proplist, float, '--proplist')
)
else:
prop = parse_string_as_list(args.proportion, float, '--proportion', 3)
prop_list = np.arange(prop[0], prop[1] + prop[2], prop[2])
if args.seleclist:
selec_list = np.array(
parse_string_as_list(args.seleclist, float, '--seleclist')
)
else:
selec = parse_string_as_list(args.selection, float, '--selection', 3)
selec_list = np.arange(selec[0], selec[1] + selec[2], selec[2])
delimiter = args.delimiter
tplot.contour_plot(
input_file = args.infile,
num_trajectories = args.numtraj,
s_list = selec_list,
p_list = prop_list,
contour_line_subtract = args.contourline,
delimiter = delimiter,
save = args.outfile,
show = args.view
)
| 34.506925 | 86 | 0.675363 |
794581858e66b8b7bd414205e1bfaf754c403967 | 3,087 | py | Python | verdin/query.py | localstack/verdin | c830d2a0cabb0ef040eb7c513553682844d9051e | [
"Apache-2.0"
] | 5 | 2021-09-25T14:14:20.000Z | 2021-10-30T06:06:51.000Z | verdin/query.py | localstack/verdin | c830d2a0cabb0ef040eb7c513553682844d9051e | [
"Apache-2.0"
] | 1 | 2022-01-26T11:40:07.000Z | 2022-01-26T11:40:08.000Z | verdin/query.py | localstack/verdin | c830d2a0cabb0ef040eb7c513553682844d9051e | [
"Apache-2.0"
] | null | null | null | import enum
import logging
from typing import Any, Dict, List, Optional, TypedDict
import requests
from . import config
LOG = logging.getLogger(__name__)
class OutputFormat(enum.Enum):
# https://docs.tinybird.co/api-reference/query-api.html#id6
CSV = "CSV"
CSVWithNames = "CSVWithNames"
JSON = "JSON"
TSV = "TSV"
TSVWithNames = "TSVWithNames"
PrettyCompact = "PrettyCompact"
JSONEachRow = "JSONEachRow"
class QueryMetadata(TypedDict):
name: str
type: str
class Statistics(TypedDict):
elapsed: float
rows_read: int
bytes_read: int
JsonData = Dict[str, Any]
class JsonResult(TypedDict):
meta: List[QueryMetadata]
data: List[JsonData]
rows: int
statistics: Statistics
class QueryJsonResult:
response: requests.Response
result: JsonResult
def __init__(self, response: requests.Response):
self.response = response
self.result = response.json()
@property
def empty(self):
return not self.result.get("data")
@property
def meta(self) -> List[QueryMetadata]:
return self.result.get("meta")
@property
def data(self) -> List[JsonData]:
return self.result.get("data")
class QueryError(Exception):
def __init__(self, response: requests.Response) -> None:
self.response = response
msg = response.text
try:
doc = response.json()
if doc["error"]:
msg = doc["error"]
except Exception:
pass
super().__init__(f"{response.status_code}: {msg}")
class SqlQuery:
"""
Tinybird SQL Query. https://docs.tinybird.co/api-reference/query-api.html#get--v0-sql
"""
endpoint: str = "/v0/sql"
sql: str
format: Optional[OutputFormat]
def __init__(self, sql: str, token, format: Optional[OutputFormat] = None, api=None) -> None:
self.sql = sql
self.format = format or OutputFormat.JSON
self.token = token
self.api = (api or config.API_URL).rstrip("/") + self.endpoint
def get(self, format: Optional[OutputFormat] = None):
# TODO: replicate tinybird API concepts instead of returning Response
query = {"q": self._sql_with_format(format or self.format)}
headers = {"Content-Type": "text/html; charset=utf-8"}
if self.token:
headers["Authorization"] = f"Bearer {self.token}"
LOG.debug(
"querying %s with query: %s",
self.api,
query,
)
response = requests.get(url=self.api, params=query, headers=headers)
if not response.ok:
raise QueryError(response)
return response
def json(self) -> QueryJsonResult:
response = self.get(OutputFormat.JSON)
return QueryJsonResult(response)
def _sql_with_format(self, output_format: Optional[OutputFormat] = None):
# TODO: handle potentially already existing FORMAT string
if not output_format:
return self.sql
return self.sql + f" FORMAT {output_format.value}"
| 24.895161 | 97 | 0.630062 |
794581bca7ad5cd4eafa91f1b67ee74c7f98af85 | 4,292 | py | Python | pkgs/development/tools/analysis/radare2/update.py | peanutbutter144/nixpkgs | 2c117aec376244aa3daf3020b0691dcad1de7492 | [
"MIT"
] | 5 | 2021-07-31T15:06:36.000Z | 2022-03-19T10:27:06.000Z | pkgs/development/tools/analysis/radare2/update.py | peanutbutter144/nixpkgs | 2c117aec376244aa3daf3020b0691dcad1de7492 | [
"MIT"
] | 627 | 2019-10-06T15:19:30.000Z | 2020-03-11T05:16:56.000Z | pkgs/development/tools/analysis/radare2/update.py | peanutbutter144/nixpkgs | 2c117aec376244aa3daf3020b0691dcad1de7492 | [
"MIT"
] | 6 | 2020-01-27T13:30:39.000Z | 2020-11-06T04:49:38.000Z | #!/usr/bin/env nix-shell
#!nix-shell -p nix -p python3 -p git -i python
# USAGE - just run the script: ./update.py
# When editing this file, make also sure it passes the mypy typecheck
# and is formatted with black.
import fileinput
import json
import re
import subprocess
import tempfile
import urllib.request
from datetime import datetime
from pathlib import Path
from typing import Dict
SCRIPT_DIR = Path(__file__).parent.resolve()
def sh(*args: str) -> str:
out = subprocess.check_output(list(args))
return out.strip().decode("utf-8")
def prefetch_github(owner: str, repo: str, ref: str) -> str:
return sh(
"nix-prefetch-url",
"--unpack",
f"https://github.com/{owner}/{repo}/archive/{ref}.tar.gz",
)
def get_radare2_rev() -> str:
url = "https://api.github.com/repos/radare/radare2/releases/latest"
with urllib.request.urlopen(url) as response:
release = json.load(response) # type: ignore
return release["tag_name"]
def get_cutter_version() -> str:
version_expr = """
(with import <nixpkgs> {}; (builtins.parseDrvName (qt5.callPackage <radare2/cutter.nix> {}).name).version)
"""
return sh("nix", "eval", "--raw", version_expr.strip(), "-I", "radare2={0}".format(SCRIPT_DIR))
def get_r2_cutter_rev() -> str:
version = get_cutter_version()
url = f"https://api.github.com/repos/radareorg/cutter/contents?ref=v{version}"
with urllib.request.urlopen(url) as response:
data = json.load(response) # type: ignore
for entry in data:
if entry["name"] == "radare2":
return entry["sha"]
raise Exception("no radare2 submodule found in github.com/radareorg/cutter")
def git(dirname: str, *args: str) -> str:
return sh("git", "-C", dirname, *args)
def get_repo_info(dirname: str, rev: str) -> Dict[str, str]:
sha256 = prefetch_github("radare", "radare2", rev)
cs_ver = None
with open(Path(dirname).joinpath("shlr", "Makefile")) as makefile:
for l in makefile:
match = re.match("CS_VER=(\S+)", l)
if match:
cs_ver = match.group(1)
assert cs_ver is not None
cs_sha256 = prefetch_github("aquynh", "capstone", cs_ver)
return dict(
rev=rev,
sha256=sha256,
version_commit=git(dirname, "rev-list", "--all", "--count"),
gittap=git(dirname, "describe", "--tags", "--match", "[0-9]*"),
gittip=git(dirname, "rev-parse", "HEAD"),
cs_ver=cs_ver,
cs_sha256=cs_sha256,
)
def write_package_expr(version: str, info: Dict[str, str]) -> str:
return f"""generic {{
version_commit = "{info["version_commit"]}";
gittap = "{info["gittap"]}";
gittip = "{info["gittip"]}";
rev = "{info["rev"]}";
version = "{version}";
sha256 = "{info["sha256"]}";
cs_ver = "{info["cs_ver"]}";
cs_sha256 = "{info["cs_sha256"]}";
}}"""
def main() -> None:
radare2_rev = get_radare2_rev()
r2_cutter_rev = get_r2_cutter_rev()
with tempfile.TemporaryDirectory() as dirname:
git(
dirname,
"clone",
"--branch",
radare2_rev,
"https://github.com/radare/radare2",
".",
)
nix_file = str(SCRIPT_DIR.joinpath("default.nix"))
radare2_info = get_repo_info(dirname, radare2_rev)
git(dirname, "checkout", r2_cutter_rev)
timestamp = git(dirname, "log", "-n1", "--format=%at")
r2_cutter_version = datetime.fromtimestamp(int(timestamp)).strftime("%Y-%m-%d")
r2_cutter_info = get_repo_info(dirname, r2_cutter_rev)
in_block = False
with fileinput.FileInput(nix_file, inplace=True) as f:
for l in f:
if "#<generated>" in l:
in_block = True
print(
f""" #<generated>
# DO NOT EDIT! Automatically generated by ./update.py
radare2 = {write_package_expr(radare2_rev, radare2_info)};
r2-for-cutter = {write_package_expr(r2_cutter_version, r2_cutter_info)};
#</generated>"""
)
elif "#</generated>" in l:
in_block = False
elif not in_block:
print(l, end="")
if __name__ == "__main__":
main()
| 30.225352 | 106 | 0.599254 |
79458204cddbf340f05c7c18b253717914e06992 | 901 | py | Python | setup.py | yangll0620/importnb | 67a68f7efb0808a536bd6d25d46ddfc217bd5f1e | [
"MIT"
] | null | null | null | setup.py | yangll0620/importnb | 67a68f7efb0808a536bd6d25d46ddfc217bd5f1e | [
"MIT"
] | null | null | null | setup.py | yangll0620/importnb | 67a68f7efb0808a536bd6d25d46ddfc217bd5f1e | [
"MIT"
] | null | null | null | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name = 'import_nbmodule',
version = '0.0.1',
description = 'import jupyter notebook .ipynb module',
long_description = long_description,
long_description_content_type = "text/markdown",
author = "Lingling Yang",
author_email = "[email protected]",
py_modules = ['import_nbmodule'],
packages = find_packages(),
# the project's homepage
url = 'https://github.com/yangll0620/import_nbmodule',
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | 39.173913 | 82 | 0.567148 |
7945823367840e2e7993dd7b2e094093ef7285e1 | 14,064 | py | Python | cryptoapis/model/list_transactions_by_block_height_ribsd_vin.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | cryptoapis/model/list_transactions_by_block_height_ribsd_vin.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | cryptoapis/model/list_transactions_by_block_height_ribsd_vin.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.list_transactions_by_block_height_ribsd_script_sig import ListTransactionsByBlockHeightRIBSDScriptSig
globals()['ListTransactionsByBlockHeightRIBSDScriptSig'] = ListTransactionsByBlockHeightRIBSDScriptSig
class ListTransactionsByBlockHeightRIBSDVin(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'addresses': ([str],), # noqa: E501
'coinbase': (str,), # noqa: E501
'script_sig': (ListTransactionsByBlockHeightRIBSDScriptSig,), # noqa: E501
'sequence': (str,), # noqa: E501
'txinwitness': ([str],), # noqa: E501
'vout': (int,), # noqa: E501
'txid': (str,), # noqa: E501
'value': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'addresses': 'addresses', # noqa: E501
'coinbase': 'coinbase', # noqa: E501
'script_sig': 'scriptSig', # noqa: E501
'sequence': 'sequence', # noqa: E501
'txinwitness': 'txinwitness', # noqa: E501
'vout': 'vout', # noqa: E501
'txid': 'txid', # noqa: E501
'value': 'value', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, addresses, coinbase, script_sig, sequence, txinwitness, vout, *args, **kwargs): # noqa: E501
"""ListTransactionsByBlockHeightRIBSDVin - a model defined in OpenAPI
Args:
addresses ([str]):
coinbase (str): Represents the coinbase hex.
script_sig (ListTransactionsByBlockHeightRIBSDScriptSig):
sequence (str): Represents the script sequence number.
txinwitness ([str]):
vout (int): It refers to the index of the output address of this transaction. The index starts from 0.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
txid (str): Represents the reference transaction identifier.. [optional] # noqa: E501
value (str): Represents the sent/received amount.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.addresses = addresses
self.coinbase = coinbase
self.script_sig = script_sig
self.sequence = sequence
self.txinwitness = txinwitness
self.vout = vout
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, addresses, coinbase, script_sig, sequence, txinwitness, vout, *args, **kwargs): # noqa: E501
"""ListTransactionsByBlockHeightRIBSDVin - a model defined in OpenAPI
Args:
addresses ([str]):
coinbase (str): Represents the coinbase hex.
script_sig (ListTransactionsByBlockHeightRIBSDScriptSig):
sequence (str): Represents the script sequence number.
txinwitness ([str]):
vout (int): It refers to the index of the output address of this transaction. The index starts from 0.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
txid (str): Represents the reference transaction identifier.. [optional] # noqa: E501
value (str): Represents the sent/received amount.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.addresses = addresses
self.coinbase = coinbase
self.script_sig = script_sig
self.sequence = sequence
self.txinwitness = txinwitness
self.vout = vout
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.811075 | 484 | 0.589377 |
7945828abc92d6e0f6d91a49fc339316a9d05c30 | 49,367 | py | Python | tensorflow/python/kernel_tests/summary_ops_test.py | A-Pot/tensorflow | 2d1cf8523b06ff29f53ddb8b6506e53660b51aed | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/summary_ops_test.py | A-Pot/tensorflow | 2d1cf8523b06ff29f53ddb8b6506e53660b51aed | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/summary_ops_test.py | A-Pot/tensorflow | 2d1cf8523b06ff29f53ddb8b6506e53660b51aed | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 summary ops from summary_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SummaryOpsCoreTest(test_util.TensorFlowTestCase):
def testWrite(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write('tag', 42, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write('tag', 42, step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_metadata(self):
logdir = self.get_temp_dir()
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = 'foo'
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('obj', 0, 0, metadata=metadata)
summary_ops.write('bytes', 0, 0, metadata=metadata.SerializeToString())
m = constant_op.constant(metadata.SerializeToString())
summary_ops.write('string_tensor', 0, 0, metadata=m)
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(metadata, events[1].summary.value[0].metadata)
self.assertEqual(metadata, events[2].summary.value[0].metadata)
self.assertEqual(metadata, events[3].summary.value[0].metadata)
def testWrite_name(self):
@def_function.function
def f():
output = summary_ops.write('tag', 42, step=12, name='anonymous')
self.assertTrue(output.name.startswith('anonymous'))
f()
def testWrite_ndarray(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [[1, 2], [3, 4]], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([[1, 2], [3, 4]], to_numpy(value))
def testWrite_tensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
t = constant_op.constant([[1, 2], [3, 4]])
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', t, step=12)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_tensor_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f(t):
with writer.as_default():
summary_ops.write('tag', t, step=12)
t = constant_op.constant([[1, 2], [3, 4]])
f(t)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_stringTensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [b'foo', b'bar'], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([b'foo', b'bar'], to_numpy(value))
@test_util.run_gpu_only
def testWrite_gpuDeviceContext(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
with ops.device('/GPU:0'):
value = constant_op.constant(42.0)
step = constant_op.constant(12, dtype=dtypes.int64)
summary_ops.write('tag', value, step=step).numpy()
empty_metadata = summary_pb2.SummaryMetadata()
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertEqual(42, to_numpy(events[1].summary.value[0]))
self.assertEqual(empty_metadata, events[1].summary.value[0].metadata)
@test_util.also_run_as_tf_function
def testWrite_noDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42, step=0))
@test_util.also_run_as_tf_function
def testWrite_noStep_okayIfAlsoNoDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42))
def testWrite_noStep(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
with self.assertRaisesRegex(ValueError, 'No step set'):
summary_ops.write('tag', 42)
def testWrite_noStep_okayIfNotRecordingSummaries(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(False):
self.assertFalse(summary_ops.write('tag', 42))
def testWrite_usingDefaultStep(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.set_step(1)
summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
summary_ops.write('tag', 1.0)
mystep = variables.Variable(10, dtype=dtypes.int64)
summary_ops.set_step(mystep)
summary_ops.write('tag', 1.0)
mystep.assign_add(1)
summary_ops.write('tag', 1.0)
events = events_from_logdir(logdir)
self.assertEqual(5, len(events))
self.assertEqual(1, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(10, events[3].step)
self.assertEqual(11, events[4].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
summary_ops.set_step(1)
f()
summary_ops.set_step(2)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the function was first traced.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
f()
mystep.assign_add(1)
f()
mystep.assign(10)
f()
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer_v2(logdir)
summary_ops.set_step(1)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(write_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the graph was constructed.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer_v2(logdir)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
first_assign_op = mystep.assign_add(1)
second_assign_op = mystep.assign(10)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(mystep.initializer)
sess.run(write_op)
sess.run(first_assign_op)
sess.run(write_op)
sess.run(second_assign_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStep_fromAsDefault(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
with writer.as_default(step=1):
summary_ops.write('tag', 1.0)
with writer.as_default():
summary_ops.write('tag', 1.0)
with writer.as_default(step=2):
summary_ops.write('tag', 1.0)
summary_ops.write('tag', 1.0)
summary_ops.set_step(3)
summary_ops.write('tag', 1.0)
events = events_from_logdir(logdir)
self.assertListEqual([1, 1, 2, 1, 3], [e.step for e in events[1:]])
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromAsDefault(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
mystep = variables.Variable(1, dtype=dtypes.int64)
with writer.as_default(step=mystep):
summary_ops.write('tag', 1.0)
with writer.as_default():
mystep.assign(2)
summary_ops.write('tag', 1.0)
with writer.as_default(step=3):
summary_ops.write('tag', 1.0)
summary_ops.write('tag', 1.0)
mystep.assign(4)
summary_ops.write('tag', 1.0)
events = events_from_logdir(logdir)
self.assertListEqual([1, 2, 3, 2, 4], [e.step for e in events[1:]])
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStep_fromSetAsDefault(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
mystep = variables.Variable(1, dtype=dtypes.int64)
writer.set_as_default(step=mystep)
summary_ops.write('tag', 1.0)
mystep.assign(2)
summary_ops.write('tag', 1.0)
writer.set_as_default(step=3)
summary_ops.write('tag', 1.0)
writer.flush()
events = events_from_logdir(logdir)
self.assertListEqual([1, 2, 3], [e.step for e in events[1:]])
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromSetAsDefault(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
writer.set_as_default(step=1)
summary_ops.write('tag', 1.0)
writer.set_as_default(step=2)
summary_ops.write('tag', 1.0)
writer.set_as_default()
summary_ops.write('tag', 1.0)
writer.flush()
events = events_from_logdir(logdir)
self.assertListEqual([1, 2, 2], [e.step for e in events[1:]])
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_recordIf_constant(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
self.assertTrue(summary_ops.write('default', 1, step=0))
with summary_ops.record_if(True):
self.assertTrue(summary_ops.write('set_on', 1, step=0))
with summary_ops.record_if(False):
self.assertFalse(summary_ops.write('set_off', 1, step=0))
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_constant_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
# Use assertAllEqual instead of assertTrue since it works in a defun.
self.assertAllEqual(summary_ops.write('default', 1, step=0), True)
with summary_ops.record_if(True):
self.assertAllEqual(summary_ops.write('set_on', 1, step=0), True)
with summary_ops.record_if(False):
self.assertAllEqual(summary_ops.write('set_off', 1, step=0), False)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_callable(self):
logdir = self.get_temp_dir()
with context.eager_mode():
step = variables.Variable(-1, dtype=dtypes.int64)
def record_fn():
step.assign_add(1)
return int(step % 2) == 0
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(record_fn):
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_callable_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
step = variables.Variable(-1, dtype=dtypes.int64)
@def_function.function
def record_fn():
step.assign_add(1)
return math_ops.equal(step % 2, 0)
@def_function.function
def f():
with writer.as_default():
with summary_ops.record_if(record_fn):
return [
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step)]
self.assertAllEqual(f(), [True, False, True])
self.assertAllEqual(f(), [False, True, False])
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_tensorInput_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int64)])
def f(step):
with writer.as_default():
with summary_ops.record_if(math_ops.equal(step % 2, 0)):
return summary_ops.write('tag', 1, step=step)
self.assertTrue(f(0))
self.assertFalse(f(1))
self.assertTrue(f(2))
self.assertFalse(f(3))
self.assertTrue(f(4))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWriteRawPb(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_fromFunction(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_multipleValues(self):
logdir = self.get_temp_dir()
pb1 = summary_pb2.Summary()
pb1.value.add().simple_value = 1.0
pb1.value.add().simple_value = 2.0
pb2 = summary_pb2.Summary()
pb2.value.add().simple_value = 3.0
pb3 = summary_pb2.Summary()
pb3.value.add().simple_value = 4.0
pb3.value.add().simple_value = 5.0
pb3.value.add().simple_value = 6.0
pbs = [pb.SerializeToString() for pb in (pb1, pb2, pb3)]
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pbs, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
expected_pb = summary_pb2.Summary()
for i in range(6):
expected_pb.value.add().simple_value = i + 1.0
self.assertProtoEquals(expected_pb, events[1].summary)
def testWriteRawPb_invalidValue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
with self.assertRaisesRegex(
errors.DataLossError,
'Bad tf.compat.v1.Summary binary proto tensor string'):
summary_ops.write_raw_pb('notaproto', step=12)
@test_util.also_run_as_tf_function
def testGetSetStep(self):
try:
self.assertIsNone(summary_ops.get_step())
summary_ops.set_step(1)
# Use assertAllEqual instead of assertEqual since it works in a defun.
self.assertAllEqual(1, summary_ops.get_step())
summary_ops.set_step(constant_op.constant(2))
self.assertAllEqual(2, summary_ops.get_step())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable(self):
with context.eager_mode():
try:
mystep = variables.Variable(0)
summary_ops.set_step(mystep)
self.assertAllEqual(0, summary_ops.get_step().read_value())
mystep.assign_add(1)
self.assertAllEqual(1, summary_ops.get_step().read_value())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(1, summary_ops.get_step().read_value())
summary_ops.get_step().assign_add(1)
self.assertAllEqual(2, summary_ops.get_step().read_value())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable_fromFunction(self):
with context.eager_mode():
try:
@def_function.function
def set_step(step):
summary_ops.set_step(step)
return summary_ops.get_step()
@def_function.function
def get_and_increment():
summary_ops.get_step().assign_add(1)
return summary_ops.get_step()
mystep = variables.Variable(0)
self.assertAllEqual(0, set_step(mystep))
self.assertAllEqual(0, summary_ops.get_step().read_value())
self.assertAllEqual(1, get_and_increment())
self.assertAllEqual(2, get_and_increment())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(3, get_and_increment())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.also_run_as_tf_function
def testSummaryScope(self):
with summary_ops.summary_scope('foo') as (tag, scope):
self.assertEqual('foo', tag)
self.assertEqual('foo/', scope)
with summary_ops.summary_scope('bar') as (tag, scope):
self.assertEqual('foo/bar', tag)
self.assertEqual('foo/bar/', scope)
with summary_ops.summary_scope('with/slash') as (tag, scope):
self.assertEqual('foo/with/slash', tag)
self.assertEqual('foo/with/slash/', scope)
with ops.name_scope(None, skip_on_eager=False):
with summary_ops.summary_scope('unnested') as (tag, scope):
self.assertEqual('unnested', tag)
self.assertEqual('unnested/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_defaultName(self):
with summary_ops.summary_scope(None) as (tag, scope):
self.assertEqual('summary', tag)
self.assertEqual('summary/', scope)
with summary_ops.summary_scope(None, 'backup') as (tag, scope):
self.assertEqual('backup', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_handlesCharactersIllegalForScope(self):
with summary_ops.summary_scope('f?o?o') as (tag, scope):
self.assertEqual('f?o?o', tag)
self.assertEqual('foo/', scope)
# If all characters aren't legal for a scope name, use default name.
with summary_ops.summary_scope('???', 'backup') as (tag, scope):
self.assertEqual('???', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_nameNotUniquifiedForTag(self):
constant_op.constant(0, name='foo')
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with ops.name_scope('with', skip_on_eager=False):
constant_op.constant(0, name='slash')
with summary_ops.summary_scope('with/slash') as (tag, _):
self.assertEqual('with/slash', tag)
def testAllV2SummaryOps(self):
logdir = self.get_temp_dir()
def define_ops():
result = []
# TF 2.0 summary ops
result.append(summary_ops.write('write', 1, step=0))
result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
# TF 1.x tf.contrib.summary ops
result.append(summary_ops.generic('tensor', 1, step=1))
result.append(summary_ops.scalar('scalar', 2.0, step=1))
result.append(summary_ops.histogram('histogram', [1.0], step=1))
result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
return result
with context.graph_mode():
ops_without_writer = define_ops()
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(True):
ops_recording_on = define_ops()
with summary_ops.record_if(False):
ops_recording_off = define_ops()
# We should be collecting all ops defined with a default writer present,
# regardless of whether recording was set on or off, but not those defined
# without a writer at all.
del ops_without_writer
expected_ops = ops_recording_on + ops_recording_off
self.assertCountEqual(expected_ops, summary_ops.all_v2_summary_ops())
class SummaryWriterTest(test_util.TensorFlowTestCase):
def testCreate_withInitAndClose(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
get_total = lambda: len(events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
def testCreate_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
# Returned SummaryWriter must be stored in a non-local variable so it
# lives throughout the function execution.
if not hasattr(f, 'writer'):
f.writer = summary_ops.create_file_writer_v2(logdir)
with context.eager_mode():
f()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
def testCreate_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
with context.graph_mode():
logdir_tensor = constant_op.constant(logdir)
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
summary_ops.create_file_writer_v2(logdir_tensor)
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
summary_ops.create_file_writer_v2(constant_op.constant(logdir))
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
f()
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_unpersistedResource_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
with summary_ops.create_file_writer_v2(logdir).as_default():
pass # Calling .as_default() is enough to indicate use.
with context.eager_mode():
# TODO(nickfelt): change this to a better error
with self.assertRaisesRegex(
errors.NotFoundError, 'Resource.*does not exist'):
f()
# Even though we didn't use it, an event file will have been created.
self.assertEqual(1, len(gfile.Glob(os.path.join(logdir, '*'))))
def testCreate_immediateSetAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
summary_ops.create_file_writer_v2(logdir).set_as_default()
summary_ops.flush()
finally:
# Ensure we clean up no matter how the test executes.
summary_ops._summary_state.writer = None # pylint: disable=protected-access
def testCreate_immediateAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.flush()
def testNoSharing(self):
# Two writers with the same logdir should not share state.
logdir = self.get_temp_dir()
with context.eager_mode():
writer1 = summary_ops.create_file_writer_v2(logdir)
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
writer2 = summary_ops.create_file_writer_v2(logdir)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testNoSharing_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f1():
if not hasattr(f1, 'writer'):
f1.writer = summary_ops.create_file_writer_v2(logdir)
with f1.writer.as_default():
summary_ops.write('tag', 1, step=1)
@def_function.function
def f2():
if not hasattr(f2, 'writer'):
f2.writer = summary_ops.create_file_writer_v2(logdir)
with f2.writer.as_default():
summary_ops.write('tag', 1, step=2)
with context.eager_mode():
f1()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
f2()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
f1()
f2()
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testMaxQueue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(
logdir, max_queue=1, flush_millis=999999).as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
def testWriterFlush(self):
logdir = self.get_temp_dir()
get_total = lambda: len(events_from_logdir(logdir))
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
writer.flush()
self.assertEqual(2, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(2, get_total())
# Exiting the "as_default()" should do an implicit flush
self.assertEqual(3, get_total())
def testFlushFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
# Test "writer" parameter
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
summary_ops.flush(writer=writer)
self.assertEqual(4, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(4, get_total())
summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access
self.assertEqual(5, get_total())
@test_util.assert_no_new_tensors
def testNoMemoryLeak_graphMode(self):
logdir = self.get_temp_dir()
with context.graph_mode(), ops.Graph().as_default():
summary_ops.create_file_writer_v2(logdir)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoMemoryLeak_eagerMode(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', 1, step=0)
def testClose_preventsLaterUse(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
writer.close()
writer.close() # redundant close() is a no-op
writer.flush() # redundant flush() is a no-op
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.init()
with self.assertRaisesRegex(RuntimeError, 'already closed'):
with writer.as_default():
self.fail('should not get here')
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.set_as_default()
def testClose_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
writer.close()
self.assertNotIn(eventfile, get_open_filenames())
def testDereference_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
del writer
self.assertNotIn(eventfile, get_open_filenames())
class SummaryOpsTest(test_util.TensorFlowTestCase):
def tearDown(self):
summary_ops.trace_off()
def exec_summary_op(self, summary_op_fn):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
with writer.as_default():
summary_op_fn()
writer.close()
events = events_from_logdir(logdir)
return events[1]
def run_metadata(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
with writer.as_default():
summary_ops.run_metadata(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def run_metadata_graphs(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
with writer.as_default():
summary_ops.run_metadata_graphs(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def create_run_metadata(self):
step_stats = step_stats_pb2.StepStats(dev_stats=[
step_stats_pb2.DeviceStepStats(
device='cpu:0',
node_stats=[step_stats_pb2.NodeExecStats(node_name='hello')])
])
return config_pb2.RunMetadata(
function_graphs=[
config_pb2.RunMetadata.FunctionGraphs(
pre_optimization_graph=graph_pb2.GraphDef(
node=[node_def_pb2.NodeDef(name='foo')]))
],
step_stats=step_stats)
def run_trace(self, f, step=1):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
summary_ops.trace_on(graph=True, profiler=False)
with writer.as_default():
f()
summary_ops.trace_export(name='foo', step=step)
writer.close()
events = events_from_logdir(logdir)
return events[1]
@test_util.run_v2_only
def testRunMetadata_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadata_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadata_wholeRunMetadata(self):
expected_run_metadata = """
step_stats {
dev_stats {
device: "cpu:0"
node_stats {
node_name: "hello"
}
}
}
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadata_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testRunMetadataGraph_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadataGraph_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata_graph"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_runMetadataFragment(self):
expected_run_metadata = """
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata_graphs(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testTrace(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
event = self.run_trace(f)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
# Content of function_graphs is large and, for instance, device can change.
self.assertTrue(hasattr(actual_run_metadata, 'function_graphs'))
@test_util.run_v2_only
def testTrace_cannotEnableTraceInFunction(self):
@def_function.function
def f():
summary_ops.trace_on(graph=True, profiler=False)
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegex(
str(mock_log.call_args), 'Cannot enable trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotEnableTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_on(graph=True, profiler=False)
self.assertRegex(
str(mock_log.call_args), 'Must enable trace in eager mode.')
@test_util.run_v2_only
def testTrace_cannotExportTraceWithoutTrace(self):
with six.assertRaisesRegex(self, ValueError,
'Must enable trace before export.'):
summary_ops.trace_export(name='foo', step=1)
@test_util.run_v2_only
def testTrace_cannotExportTraceInFunction(self):
summary_ops.trace_on(graph=True, profiler=False)
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
summary_ops.trace_export(name='foo', step=1)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegex(
str(mock_log.call_args), 'Cannot export trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotExportTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_export(name='foo', step=1)
self.assertRegex(
str(mock_log.call_args),
'Can only export trace while executing eagerly.')
@test_util.run_v2_only
def testTrace_usesDefaultStep(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
try:
summary_ops.set_step(42)
event = self.run_trace(f, step=None)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testTrace_withProfiler(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
summary_ops.trace_on(graph=True, profiler=True)
profiler_outdir = self.get_temp_dir()
with writer.as_default():
f()
summary_ops.trace_export(
name='foo', step=1, profiler_outdir=profiler_outdir)
writer.close()
@test_util.run_v2_only
def testGraph_graph(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
def summary_op_fn():
summary_ops.graph(f.get_concrete_function().graph)
event = self.exec_summary_op(summary_op_fn)
self.assertIsNotNone(event.graph_def)
@test_util.run_v2_only
def testGraph_graphDef(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
def summary_op_fn():
summary_ops.graph(f.get_concrete_function().graph.as_graph_def())
event = self.exec_summary_op(summary_op_fn)
self.assertIsNotNone(event.graph_def)
@test_util.run_v2_only
def testGraph_invalidData(self):
def summary_op_fn():
summary_ops.graph('hello')
with self.assertRaisesRegex(
ValueError,
r'\'graph_data\' is not tf.Graph or tf.compat.v1.GraphDef',
):
self.exec_summary_op(summary_op_fn)
@test_util.run_v2_only
def testGraph_fromGraphMode(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
@def_function.function
def g(graph):
summary_ops.graph(graph)
def summary_op_fn():
graph_def = f.get_concrete_function().graph.as_graph_def(add_shapes=True)
func_graph = constant_op.constant(graph_def.SerializeToString())
g(func_graph)
with self.assertRaisesRegex(
ValueError,
r'graph\(\) cannot be invoked inside a graph context.',
):
self.exec_summary_op(summary_op_fn)
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def to_numpy(summary_value):
return tensor_util.MakeNdarray(summary_value.tensor)
if __name__ == '__main__':
test.main()
| 36.460118 | 85 | 0.680049 |
794582d895aa5c63c8440ae990741f4df39e427d | 7,839 | py | Python | installer/menu.py | username-is-already-taken2/photon | 5be3d8c956156f494a2479274ca8eb9723fa2057 | [
"Apache-2.0"
] | 1 | 2020-09-17T00:43:26.000Z | 2020-09-17T00:43:26.000Z | installer/menu.py | username-is-already-taken2/photon | 5be3d8c956156f494a2479274ca8eb9723fa2057 | [
"Apache-2.0"
] | null | null | null | installer/menu.py | username-is-already-taken2/photon | 5be3d8c956156f494a2479274ca8eb9723fa2057 | [
"Apache-2.0"
] | 1 | 2020-09-17T00:43:35.000Z | 2020-09-17T00:43:35.000Z | #
#
# Author: Mahmoud Bassiouny <[email protected]>
import curses
from actionresult import ActionResult
from action import Action
class Menu(Action):
def __init__(self, starty, maxx, items, height=0, selector_menu=False,
can_navigate_outside=True, horizontal=False, default_selected=0,
save_sel=False, tab_enable=True):
self.can_navigate_outside = can_navigate_outside
self.horizontal = horizontal
self.horizontal_padding = 10
self.position = default_selected
self.head_position = 0 #This is the start of showing
self.items = items
self.items_strings = []
self.width = self.lengthen_items()
self.num_items = len(self.items)
self.save_sel = save_sel
self.tab_enable = tab_enable
if height == 0 or height > self.num_items:
self.height = self.num_items
else:
self.height = height
# Check if we need to add a scroll bar
if self.num_items > self.height:
self.show_scroll = True
self.width += 2
else:
self.show_scroll = False
# Some calculation to detitmine the size of the scroll filled portion
self.filled = int(round(self.height * self.height / float(self.num_items)))
if self.filled == 0:
self.filled += 1
for i in [1, 2]:
if (self.num_items - self.height) >= i and (self.height - self.filled) == (i - 1):
self.filled -= 1
# increment the width if it's a selector menu
self.selector_menu = selector_menu
if self.selector_menu:
self.width += 4
self.selected_items = set([])
if self.horizontal:
menu_win_width = (self.width + self.horizontal_padding) * self.num_items
else:
menu_win_width = self.width
self.window = curses.newwin(self.height, menu_win_width)
self.window.bkgd(' ', curses.color_pair(2))
self.window.keypad(1)
self.panel = curses.panel.new_panel(self.window)
self.panel.move(starty, (maxx - menu_win_width) // 2)
self.panel.hide()
curses.panel.update_panels()
def can_save_sel(self, can_save_sel):
self.save_sel = can_save_sel
def lengthen_items(self):
width = 0
for item in self.items:
if len(item[0]) > width:
width = len(item[0])
for item in self.items:
spaces = ''
for i in range(width - len(item[0])):
spaces += ' '
self.items_strings.append(item[0] + spaces)
return width + 1
def navigate(self, n):
self.position += n
if self.position < 0:
self.position = 0
elif self.position >= len(self.items):
self.position = len(self.items)-1
if self.position >= self.head_position + self.height:
self.head_position = self.position - self.height + 1
if self.position < self.head_position:
self.head_position = self.position
def render_scroll_bar(self):
if self.show_scroll:
remaining_above = self.head_position
remaining_down = self.num_items - self.height - self.head_position#
up = int(round(remaining_above * self.height / float(self.num_items)))
down = self.height - up - self.filled
if up == 0 and remaining_above > 0:
up += 1
down -= 1
if down == 0 and remaining_down > 0:
up -= 1
down += 1
if remaining_down == 0 and down != 0:
up += down
down = 0
for index in range(up):
self.window.addch(index, self.width - 2, curses.ACS_CKBOARD)
for index in range(self.filled):
self.window.addstr(index + up, self.width - 2, ' ', curses.A_REVERSE)
for index in range(down):
self.window.addch(index + up + self.filled, self.width - 2, curses.ACS_CKBOARD)
def refresh(self, highligh=True):
# self.window.clear()
for index, item in enumerate(self.items_strings):
if index < self.head_position:
continue
elif index > self.head_position + self.height - 1:
continue
elif index == self.position:
if highligh:
mode = curses.color_pair(3)
else:
mode = curses.color_pair(1)
else:
mode = curses.color_pair(2)
if self.selector_menu:
if index in self.selected_items:
item = '[x] ' + item
else:
item = '[ ] ' + item
if self.horizontal:
x = self.horizontal_padding // 2 + index * self.horizontal_padding
y = 0
else:
x = 0
y = index - self.head_position
self.window.addstr(y, x, item, mode)
self.render_scroll_bar()
self.window.refresh()
self.panel.top()
self.panel.show()
curses.panel.update_panels()
curses.doupdate()
def hide(self):
self.panel.hide()
curses.panel.update_panels()
curses.doupdate()
def do_action(self):
while True:
self.refresh()
key = self.window.getch()
if key in [curses.KEY_ENTER, ord('\n')]:
if self.selector_menu:
# send the selected indexes
result = self.items[self.position][1](self.selected_items)
else:
result = self.items[self.position][1](self.items[self.position][2])
if result.success:
self.hide()
return result
if key in [ord(' ')] and self.selector_menu:
if self.position in self.selected_items:
self.selected_items.remove(self.position)
else:
self.selected_items.add(self.position)
elif key in [ord('\t')] and self.can_navigate_outside:
if not self.tab_enable:
continue
self.refresh(False)
if self.save_sel:
return ActionResult(False, {'diskIndex': self.position})
else:
return ActionResult(False, None)
elif key == curses.KEY_UP or key == curses.KEY_LEFT:
if not self.tab_enable and key == curses.KEY_LEFT:
if self.save_sel:
return ActionResult(False, {'diskIndex': self.position, 'direction':-1})
elif self.selector_menu:
result = self.items[self.position][1](self.selected_items)
else:
result = self.items[self.position][1](self.items[self.position][2])
return ActionResult(False, {'direction': -1})
self.navigate(-1)
elif key == curses.KEY_DOWN or key == curses.KEY_RIGHT:
if not self.tab_enable and key == curses.KEY_RIGHT:
if self.save_sel:
return ActionResult(False, {'diskIndex': self.position, 'direction':1})
else:
return ActionResult(False, {'direction': 1})
self.navigate(1)
elif key == curses.KEY_NPAGE:
self.navigate(self.height)
elif key == curses.KEY_PPAGE:
self.navigate(-self.height)
elif key == curses.KEY_HOME:
self.navigate(-self.position)
| 35.470588 | 96 | 0.533742 |
794583e24bd88b799837c9805110510ad5b0fcb3 | 4,476 | py | Python | keras_frcnn/RoiPoolingConv.py | touchylk/part_classification | 25d25377addf6b459240748b61b7458233814b68 | [
"Apache-2.0"
] | 1 | 2019-03-14T01:26:20.000Z | 2019-03-14T01:26:20.000Z | keras_frcnn/RoiPoolingConv.py | touchylk/part_classification | 25d25377addf6b459240748b61b7458233814b68 | [
"Apache-2.0"
] | null | null | null | keras_frcnn/RoiPoolingConv.py | touchylk/part_classification | 25d25377addf6b459240748b61b7458233814b68 | [
"Apache-2.0"
] | null | null | null | from keras.engine.topology import Layer
import keras.backend as K
import tensorflow as tf
class RoiPoolingConv(Layer):
'''ROI pooling layer for 2D inputs.
See Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition,
K. He, X. Zhang, S. Ren, J. Sun
# Arguments
pool_size: int
Size of pooling region to use. pool_size = 7 will result in a 7x7 region.
num_rois: number of regions of interest to be used
# Input shape
list of two 4D tensors [X_img,X_roi] with shape:
X_img:
`(1, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(1, rows, cols, channels)` if dim_ordering='tf'.
X_roi:
`(1,num_rois,4)` list of rois, with ordering (x,y,w,h)
# Output shape
3D tensor with shape:
`(1, num_rois, channels, pool_size, pool_size)`
'''
def __init__(self, pool_size, num_rois, **kwargs):
self.dim_ordering = K.image_dim_ordering()
assert self.dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
self.pool_size = pool_size
self.num_rois = num_rois
super(RoiPoolingConv, self).__init__(**kwargs)
def build(self, input_shape):
if self.dim_ordering == 'th':
self.nb_channels = input_shape[0][1]
elif self.dim_ordering == 'tf':
self.nb_channels = input_shape[0][3]
def compute_output_shape(self, input_shape):
if self.dim_ordering == 'th':
return None, self.num_rois, self.nb_channels, self.pool_size, self.pool_size
else:
return None, self.num_rois, self.pool_size, self.pool_size, self.nb_channels
def call(self, x, mask=None):
assert(len(x) == 2)
img = x[0]
rois = x[1]
input_shape = K.shape(img)
outputs = []
for roi_idx in range(self.num_rois):
x = rois[0, roi_idx, 0]
y = rois[0, roi_idx, 1]
w = rois[0, roi_idx, 2]
h = rois[0, roi_idx, 3]
row_length = w / float(self.pool_size)
col_length = h / float(self.pool_size)
num_pool_regions = self.pool_size
#NOTE: the RoiPooling implementation differs between theano and tensorflow due to the lack of a resize op
# in theano. The theano implementation is much less efficient and leads to long compile times
if self.dim_ordering == 'th':
for jy in range(num_pool_regions):
for ix in range(num_pool_regions):
x1 = x + ix * row_length
x2 = x1 + row_length
y1 = y + jy * col_length
y2 = y1 + col_length
x1 = K.cast(x1, 'int32')
x2 = K.cast(x2, 'int32')
y1 = K.cast(y1, 'int32')
y2 = K.cast(y2, 'int32')
x2 = x1 + K.maximum(1,x2-x1)
y2 = y1 + K.maximum(1,y2-y1)
new_shape = [input_shape[0], input_shape[1],
y2 - y1, x2 - x1]
x_crop = img[:, :, y1:y2, x1:x2]
xm = K.reshape(x_crop, new_shape)
pooled_val = K.max(xm, axis=(2, 3))
outputs.append(pooled_val)
elif self.dim_ordering == 'tf':
x = K.cast(x, 'int32')
y = K.cast(y, 'int32')
w = K.cast(w, 'int32')
h = K.cast(h, 'int32')
rs = tf.image.resize_images(img[:, y:y+h, x:x+w, :], (self.pool_size, self.pool_size))
outputs.append(rs)
final_output = K.concatenate(outputs, axis=0)
final_output = K.reshape(final_output, (1, self.num_rois, self.pool_size, self.pool_size, self.nb_channels))
if self.dim_ordering == 'th':
final_output = K.permute_dimensions(final_output, (0, 1, 4, 2, 3))
else:
final_output = K.permute_dimensions(final_output, (0, 1, 2, 3, 4))
return final_output
def get_config(self):
config = {'pool_size': self.pool_size,
'num_rois': self.num_rois}
base_config = super(RoiPoolingConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 36.096774 | 117 | 0.535523 |
79458437858a6bf0d9ee729a3fd35a7db87a33b4 | 169 | py | Python | NudeNet/nudenet/__init__.py | sparkzsolutions/The-Dark-Onion-Crawler | 56f52127acc7ff4151d455dd1f007638ad0e795d | [
"MIT"
] | null | null | null | NudeNet/nudenet/__init__.py | sparkzsolutions/The-Dark-Onion-Crawler | 56f52127acc7ff4151d455dd1f007638ad0e795d | [
"MIT"
] | null | null | null | NudeNet/nudenet/__init__.py | sparkzsolutions/The-Dark-Onion-Crawler | 56f52127acc7ff4151d455dd1f007638ad0e795d | [
"MIT"
] | 1 | 2022-02-14T13:46:48.000Z | 2022-02-14T13:46:48.000Z | from .classifier import Classifier as NudeClassifier
from .lite_classifier import LiteClassifier as NudeClassifierLite
from .detector import Detector as NudeDetector
| 42.25 | 66 | 0.857988 |
794584a29216191fc9486510234958cb0ac1b5fa | 7,846 | py | Python | pytrademonster/testing/testOrderService.py | femtotrader/pytrademonster | 0bce61a3ed90e3bd438de2bc56b90bbb409490c4 | [
"MIT"
] | null | null | null | pytrademonster/testing/testOrderService.py | femtotrader/pytrademonster | 0bce61a3ed90e3bd438de2bc56b90bbb409490c4 | [
"MIT"
] | null | null | null | pytrademonster/testing/testOrderService.py | femtotrader/pytrademonster | 0bce61a3ed90e3bd438de2bc56b90bbb409490c4 | [
"MIT"
] | 1 | 2018-02-23T09:33:58.000Z | 2018-02-23T09:33:58.000Z | # -*- coding: utf-8 -*-
__author__ = 'adam'
import unittest
import time
from pytrademonster import PyTradeMonster
from pytrademonster.constants import TradeMonsterConstants
from pytrademonster.services import AccountServices, OrderServices
from pytrademonster.objects import LimitOrder, OrderLeg, OrderStatus
"""
########################################################################
IMPORTANT: set this to a real account number to make the tests run
########################################################################
"""
ACCOUNT_NUMBER = 'xxxxxx'
class TestOrderService(unittest.TestCase):
"""
Test most functionality surrounding submission, execution, and retrieval of orders
"""
@classmethod
def setUpClass(self):
self.pyTradeMonster = PyTradeMonster('../cred.dat')
self.orderService = OrderServices(self.pyTradeMonster)
self.accountsService = AccountServices(self.pyTradeMonster)
self.accounts = self.accountsService.getParsedAccountObjects()
@classmethod
def tearDownClass(self):
'''
Cancel all outstanding orders
:return:
'''
print 'Going to cancel all outstanding orders from unit testing...'
self.orderService.sendCancelAllOrders(self.accounts[ACCOUNT_NUMBER].accountNumber)
def createSimpleLimitOrder(self):
order = LimitOrder()
orderLeg = OrderLeg()
orderLeg.instrumentType = TradeMonsterConstants.INSTRUMENTS.EQUITY
orderLeg.symbol = 'SPY'
orderLeg.orderSide = OrderLeg.side.BUY
order.price = 0.01
order.quantity = 1
order.orderLegs = [orderLeg]
order.instrumentType = TradeMonsterConstants.INSTRUMENTS.EQUITY
order.timeInForce = LimitOrder.timeInForceEnum.DAY
order.marketSession = LimitOrder.marketSessionEnum.REG
return order
def testSingleLimitOrder(self):
self.orderService.sendCancelAllOrders(self.accounts[ACCOUNT_NUMBER].accountNumber) #cancel everything first just in case
order = self.createSimpleLimitOrder()
orderResponse = self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
status = orderResponse.status
print 'Status of order is {0}'.format(status)
self.assertTrue(status in OrderStatus.status.__dict__.keys() )
def testGetOrderConfirmation(self):
self.orderService.sendCancelAllOrders(self.accounts[ACCOUNT_NUMBER].accountNumber) #cancel everything first just in case
order = self.createSimpleLimitOrder()
orderResponse = self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
print 'Trying to confirm order...'
result = self.orderService.getOrderConfirmation(self.accounts[ACCOUNT_NUMBER], order, orderResponse)
confirm = result[TradeMonsterConstants.ResponseRoots.RETRIEVE_ORDER_CONFIRMATION_ROOT]
self.assertTrue(confirm['orderDescription'] != None)
def testSpreadOrder(self):
'''
Test a simple buy spread (debit)
:return:
'''
self.orderService.sendCancelAllOrders(self.accounts[ACCOUNT_NUMBER].accountNumber) #cancel everything first just in case
order = LimitOrder()
shortLeg = OrderLeg()
longLeg = OrderLeg()
shortLeg.instrumentType = TradeMonsterConstants.INSTRUMENTS.OPTION
shortLeg.symbol = 'SPYX1517C300000'
shortLeg.orderSide = OrderLeg.side.SELL
shortLeg.quantityRatio = 1
longLeg.instrumentType = TradeMonsterConstants.INSTRUMENTS.OPTION
longLeg.symbol = 'SPYX1517C310000'
longLeg.orderSide = OrderLeg.side.BUY
longLeg.quantityRatio = 1
order.price = 0.01
order.quantity = 1
order.instrumentType = TradeMonsterConstants.INSTRUMENTS.OPTION
order.timeInForce = LimitOrder.timeInForceEnum.DAY
order.marketSession = LimitOrder.marketSessionEnum.REG
order.orderLegs = []
order.orderLegs.append(shortLeg)
order.orderLegs.append(longLeg)
order.spreadName = TradeMonsterConstants.OrderRequests.ORDER_SPREAD_TYPES.PUT_VERTICAL
#send a live order with a silly price
result = self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
status = result.status
print 'Status of order is {0}'.format(status)
self.assertTrue(status in OrderStatus.status.__dict__.keys())
def testCancelSingleOrder(self):
order = self.createSimpleLimitOrder()
orderResponse = self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
print 'Going to cancel order',orderResponse.orderId,'...'
time.sleep(1)
result = self.orderService.sendCancelOrder(orderResponse.orderId)
self.assertTrue(TradeMonsterConstants.ResponseRoots.RETRIEVE_ORDER_CANCELLED_ROOT in result)
def testCancelAllOrders(self):
order = self.createSimpleLimitOrder()
orderResponse = self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
print 'Going to cancel all orders',orderResponse.orderId,'...'
time.sleep(1)
result = self.orderService.sendCancelAllOrders(self.accounts[ACCOUNT_NUMBER].accountNumber)
self.assertTrue(TradeMonsterConstants.ResponseRoots.RETRIEVE_ALL_CANCELLED_ROOT in result)
def testCancelDayOrder(self):
order = self.createSimpleLimitOrder()
orderResponse = self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
print 'Going to cancel day order',orderResponse.orderId,'...'
time.sleep(1)
result = self.orderService.sendCancelDayOrders(self.accounts[ACCOUNT_NUMBER].accountNumber)
self.assertTrue(TradeMonsterConstants.ResponseRoots.RETRIEVE_DAY_CANCELLED_ROOT in result)
def testCountAllOrders(self):
order = self.createSimpleLimitOrder()
self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
order = self.createSimpleLimitOrder()
self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
print 'Going to count all orders...'
time.sleep(1)
result = self.orderService.sendCountAllOpenOrders(self.accounts[ACCOUNT_NUMBER].accountNumber)
print 'Counted', result, 'orders total'
self.assertEquals(result,2)
def testCountDayOrders(self):
self.orderService.sendCancelAllOrders(self.accounts[ACCOUNT_NUMBER].accountNumber) #cancel everything first just in case
order = self.createSimpleLimitOrder()
self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
print 'Going to count day orders...'
time.sleep(1)
result = self.orderService.sendCountDayOrders(self.accounts[ACCOUNT_NUMBER].accountNumber)
print 'Counted', result, 'day orders'
self.assertEquals(result,1)
def testGetOrderHistory(self):
order = self.createSimpleLimitOrder()
orderResponse = self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
result = self.orderService.sendGetOrderHistory(orderResponse.orderId)
self.fail('TradeMonster call getOrderHistory not yet working - followup with them...')
def testGetOrderDetails(self):
order = self.createSimpleLimitOrder()
orderResponse = self.orderService.sendOrderAndGetParsedResponse(self.accounts[ACCOUNT_NUMBER], order)
result = self.orderService.sendGetOrderDetail(orderResponse.orderId)
self.assertTrue(TradeMonsterConstants.ResponseRoots.RETRIEVE_ORDER_DETAILS_ROOT in result)
if __name__ == '__main__':
unittest.main()
| 43.10989 | 128 | 0.71119 |
79458500c61a027bef230d4e8be5cf59b0d7eb75 | 3,158 | py | Python | utils/mesh_util.py | yyu1/SurfaceNet | e59cf56d55d1be7295322d5a0f4a2aa244316d86 | [
"MIT"
] | 117 | 2017-08-08T07:25:16.000Z | 2022-01-30T02:41:11.000Z | utils/mesh_util.py | yyu1/SurfaceNet | e59cf56d55d1be7295322d5a0f4a2aa244316d86 | [
"MIT"
] | 8 | 2017-10-24T11:48:30.000Z | 2020-10-31T10:45:39.000Z | utils/mesh_util.py | yyu1/SurfaceNet | e59cf56d55d1be7295322d5a0f4a2aa244316d86 | [
"MIT"
] | 35 | 2017-08-08T10:44:21.000Z | 2022-02-13T13:18:35.000Z | '''
mesh utils
Tianye Li
'''
import numpy as np
# -----------------------------------------------------------------------------
class Mesh():
def __init__( self, v=None, f=None, vc=None, vn=None ):
self.v = v
self.f = f
self.vc = vc # currently need manually specify
self.vn = vn # currently need manually specify
def write_obj( self, filename ):
save_obj( self, filename )
def copy( self ):
return Mesh( v=self.v, f=self.f, vc=self.vc, vn=self.vn )
def initialize_vc( self ):
self.vc = np.ones_like( self.v )
# -----------------------------------------------------------------------------
def load_obj( filename ):
# based on Shunsuke Saito's code
# todo: support loading per-vertex color
f = open(filename, 'r')
V = []
F = []
VC = []
VN = []
for line in f:
line = line.rstrip('\n')
parts = line.split(" ")
if parts[0] == 'v':
parts.remove('v')
v = [float(a) for a in parts]
if len(v) == 6:
VC.append(v[3:6])
V.append(v[0:3])
elif parts[0] == 'f':
parts.remove('f')
face = [int(float(ft.split('//')[0]))-1 for ft in parts] # TODO: problematic if used for rendering (all 3 vertices per triangle needed)
F.append(face)
if parts[0] == 'vn':
parts.remove('vn')
vn = [float(a) for a in parts]
VN.append(vn[0:3])
f.close()
if len(VC) == 0:
mesh_vc = None
else:
mesh_vc = np.asarray( VC ).reshape((-1,3))
if len(VN) == 0:
mesh_vn = None
else:
mesh_vn = np.asarray( VN ).reshape((-1,3))
return Mesh( v=np.asarray(V).reshape((-1,3)),
f=np.asarray(F).reshape((-1,3)),
vc=mesh_vc,
vn=mesh_vn )
# -----------------------------------------------------------------------------
def save_obj( mesh, filename ):
# based on Shunsuke Saito's code
# support per-vertex color and normals
# https://en.wikipedia.org/wiki/Wavefront_.obj_file
V = mesh.v.ravel()
F = mesh.f
file = open(filename, "w")
# write v and vc
if mesh.vc is None:
for i in range(V.shape[0]//3):
file.write('v %f %f %f\n' % ( V[3*i], V[3*i+1], V[3*i+2] ) )
else:
VC = mesh.vc.ravel()
for i in range(V.shape[0]//3):
file.write('v %f %f %f %f %f %f\n' % ( V[3*i], V[3*i+1], V[3*i+2], VC[3*i], VC[3*i+1], VC[3*i+2] ) )
# write vn and f
if mesh.vn is not None:
VN = mesh.vn
for i in range(VN.shape[0]):
file.write('vn %f %f %f\n' % (VN[i,0], VN[i,1], VN[i,2]))
# write f for vertices and normals
if F is not None:
for i in range(F.shape[0]):
file.write('f %d//%d %d//%d %d//%d\n' % (F[i,0]+1, F[i,0]+1, F[i,1]+1, F[i,1]+1, F[i,2]+1, F[i,2]+1))
else:
# write f
if F is not None:
for i in range(F.shape[0]):
file.write('f %d %d %d\n' % (F[i,0]+1, F[i,1]+1, F[i,2]+1))
file.close()
| 30.07619 | 147 | 0.447118 |
79458614f8c197ca698c61b0a03aa651996c46b3 | 596 | py | Python | recognize_camera.py | tomasrasymas/face-recognition-python | 52da6697fb775cfcd0a7e1511b356f6fd9908678 | [
"MIT"
] | null | null | null | recognize_camera.py | tomasrasymas/face-recognition-python | 52da6697fb775cfcd0a7e1511b356f6fd9908678 | [
"MIT"
] | null | null | null | recognize_camera.py | tomasrasymas/face-recognition-python | 52da6697fb775cfcd0a7e1511b356f6fd9908678 | [
"MIT"
] | null | null | null | import face_detector
import cv2
if __name__ == '__main__':
camera = face_detector.Camera()
model = face_detector.load_model()
frame = camera.get_frame()
detected_faces = face_detector.detect_faces_dlib(frame)
if detected_faces:
for face in detected_faces:
f = face_detector.pipeline(frame, face, features=True)
prediction = model.predict(f)[0]
face_detector.draw_text(frame, face, '%s' % prediction)
cv2.imshow('Face recognition', frame)
if cv2.waitKey(4000) & 0xFF == 27:
cv2.destroyAllWindows()
| 25.913043 | 67 | 0.649329 |
7945863308190473132d36629b8a7399586f7e01 | 2,068 | py | Python | tests/integration/test_always_fetch_merged/test.py | bluebirddm/ClickHouse | f53da4d36b8c3a214567c935caed478edce08363 | [
"Apache-2.0"
] | 2 | 2020-02-12T12:34:14.000Z | 2021-06-05T18:40:33.000Z | tests/integration/test_always_fetch_merged/test.py | bluebirddm/ClickHouse | f53da4d36b8c3a214567c935caed478edce08363 | [
"Apache-2.0"
] | 1 | 2020-06-29T04:54:52.000Z | 2020-06-29T04:54:52.000Z | tests/integration/test_always_fetch_merged/test.py | bluebirddm/ClickHouse | f53da4d36b8c3a214567c935caed478edce08363 | [
"Apache-2.0"
] | 1 | 2020-06-15T13:51:03.000Z | 2020-06-15T13:51:03.000Z | import pytest
import time
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_zookeeper=True)
node2 = cluster.add_instance('node2', with_zookeeper=True)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_replica_always_download(started_cluster):
node1.query("""
CREATE TABLE test_table(
key UInt64,
value String
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_table/replicated', '1')
ORDER BY tuple()
""")
node2.query("""
CREATE TABLE test_table(
key UInt64,
value String
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_table/replicated', '2')
ORDER BY tuple()
SETTINGS always_fetch_merged_part=1
""")
# Stop merges on single node
node1.query("SYSTEM STOP MERGES")
for i in range(0, 10):
node1.query("INSERT INTO test_table VALUES ({}, '{}')".format(i, i))
assert node1.query("SELECT COUNT() FROM test_table") == "10\n"
assert_eq_with_retry(node2, "SELECT COUNT() FROM test_table", "10\n")
time.sleep(3)
# Nothing is merged
assert node1.query("SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1") == "10\n"
assert node2.query("SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1") == "10\n"
node1.query("SYSTEM START MERGES")
for i in range(30):
node1_parts = node1.query("SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1").strip()
node2_parts = node2.query("SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1").strip()
if int(node1_parts) < 10 and int(node2_parts) < 10:
break
else:
time.sleep(0.5)
else:
assert int(node1_parts) < 10
assert int(node2_parts) < 10
| 30.411765 | 117 | 0.64942 |
79458673b408fbcf89f904f558c1eb3a5d0594f7 | 24,483 | py | Python | cctbx/adp_restraints/tst_ext.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | cctbx/adp_restraints/tst_ext.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | cctbx/adp_restraints/tst_ext.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
from libtbx.test_utils import approx_equal, show_diff
from cctbx.array_family import flex
from cctbx import adptbx
from cctbx import uctbx
from cctbx import adp_restraints
from cctbx.adp_restraints import adp_restraint_params
from scitbx import matrix
import libtbx.load_env
import math, os, sys
from cStringIO import StringIO
import cctbx.xray
def finite_difference_gradients(restraint_type,
proxy,
sites_cart=None,
u_cart=None,
u_iso=None,
use_u_aniso=None,
eps=1.e-8):
def residual(restraint_type, proxy, sites_cart=None,
u_cart=None, u_iso=None, use_u_aniso=None):
if sites_cart is not None:
return restraint_type(
adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart),
proxy=proxy).residual()
elif u_iso is None:
return restraint_type(
adp_restraint_params(u_cart=u_cart),
proxy=proxy).residual()
else:
assert use_u_aniso is not None
return restraint_type(
adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso),
proxy=proxy).residual()
result_aniso = [(0,0,0,0,0,0)]*len(u_cart)
result_iso = [0] * len(u_cart)
if sites_cart is not None:
assert len(sites_cart) == len(u_cart)
for i in xrange(len(u_cart)):
if u_iso is None:
result_aniso_i = []
for j in xrange(6):
h = [0,0,0,0,0,0]
h[j] = eps
h = matrix.sym(sym_mat3=h)
u_cart[i]=list((matrix.sym(sym_mat3=u_cart[i]) + h).as_sym_mat3())
qp = residual(restraint_type, proxy,
sites_cart=sites_cart, u_cart=u_cart)
u_cart[i]=list((matrix.sym(sym_mat3=u_cart[i]) - 2*h).as_sym_mat3())
qm = residual(restraint_type, proxy,
sites_cart=sites_cart, u_cart=u_cart)
dq = (qp-qm)/2
result_aniso_i.append(dq/(eps))
result_aniso[i] = result_aniso_i
else:
if use_u_aniso[i]:
result_aniso_i = []
for j in xrange(6):
h = [0,0,0,0,0,0]
h[j] = eps
h = matrix.sym(sym_mat3=h)
u_cart[i]=list((matrix.sym(sym_mat3=u_cart[i]) + h).as_sym_mat3())
qp = residual(restraint_type, proxy,
u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
u_cart[i]=list((matrix.sym(sym_mat3=u_cart[i]) - 2*h).as_sym_mat3())
qm = residual(restraint_type, proxy,
u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
dq = (qp-qm)/2
result_aniso_i.append(dq/(eps))
result_aniso[i] = result_aniso_i
else:
u_iso[i] += eps
qp = residual(restraint_type, proxy,
u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
u_iso[i] -= 2*eps
qm = residual(restraint_type, proxy,
u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
dq = (qp-qm)/2
result_iso[i] = dq/(eps)
return result_aniso, result_iso
result = [
['C1', 'C2', 0.0039, 0.0162, 0.0123],
['C1', 'N1', 0.0002, 0.0129, 0.0131],
['C2', 'C1', 0.0039, 0.0123, 0.0162],
['C3', 'C4', 0.0001, 0.0147, 0.0146],
['C3', 'C8', 0.0024, 0.0078, 0.0102],
['C4', 'C3', 0.0001, 0.0146, 0.0147],
['C4', 'C5', 0.0013, 0.0156, 0.0144],
['C5', 'C4', 0.0013, 0.0144, 0.0156],
['C5', 'C6', 0.0012, 0.0109, 0.0121],
['C6', 'C5', 0.0012, 0.0121, 0.0109],
['C6', 'C7', 0.0002, 0.0171, 0.0169],
['C6', 'O1', 0.0008, 0.0132, 0.0140],
['C7', 'C6', 0.0002, 0.0169, 0.0171],
['C7', 'C8', 0.0004, 0.0165, 0.0161],
['C8', 'C3', 0.0024, 0.0102, 0.0078],
['C8', 'C7', 0.0004, 0.0161, 0.0165],
['C9', 'O2', 0.0017, 0.0106, 0.0123],
['C11', 'O3', 0.0007, 0.0151, 0.0145],
['C11', 'N3', 0.0009, 0.0207, 0.0198],
['C12', 'C13', 0.0006, 0.0114, 0.0119],
['C12', 'N3', 0.0040, 0.0193, 0.0153],
['C13', 'C12', 0.0006, 0.0119, 0.0114],
['C13', 'O4', 0.0001, 0.0128, 0.0130],
['C13', 'N4', 0.0009, 0.0110, 0.0119],
['C14', 'N4', 0.0006, 0.0090, 0.0096],
['C16', 'C17', 0.0017, 0.0168, 0.0186],
['C16', 'C21', 0.0023, 0.0205, 0.0183],
['C17', 'C16', 0.0017, 0.0186, 0.0168],
['C17', 'C18', 0.0063, 0.0178, 0.0241],
['C18', 'C17', 0.0063, 0.0241, 0.0178],
['C18', 'C19', 0.0049, 0.0358, 0.0309],
['C19', 'C18', 0.0049, 0.0309, 0.0358],
['C19', 'C20', 0.0012, 0.0207, 0.0196],
['C20', 'C19', 0.0012, 0.0196, 0.0207],
['C20', 'C21', 0.0006, 0.0163, 0.0157],
['C21', 'C16', 0.0023, 0.0183, 0.0205],
['C21', 'C20', 0.0006, 0.0157, 0.0163],
['C22', 'N5', 0.0015, 0.0098, 0.0083],
['C23', 'C24', 0.0002, 0.0072, 0.0073],
['C24', 'C23', 0.0002, 0.0073, 0.0072],
['C25', 'C27', 0.0001, 0.0075, 0.0076],
['C27', 'C25', 0.0001, 0.0076, 0.0075],
['C28', 'O6', 0.0023, 0.0192, 0.0169],
['C28', 'O7', 0.0001, 0.0120, 0.0119],
['O1', 'C6', 0.0008, 0.0140, 0.0132],
['O2', 'C9', 0.0017, 0.0123, 0.0106],
['O3', 'C11', 0.0007, 0.0145, 0.0151],
['O4', 'C13', 0.0001, 0.0130, 0.0128],
['O6', 'C28', 0.0023, 0.0169, 0.0192],
['O7', 'C28', 0.0001, 0.0119, 0.0120],
['N1', 'C1', 0.0002, 0.0131, 0.0129],
['N3', 'C11', 0.0009, 0.0198, 0.0207],
['N3', 'C12', 0.0040, 0.0153, 0.0193],
['N4', 'C13', 0.0009, 0.0119, 0.0110],
['N4', 'C14', 0.0006, 0.0096, 0.0090],
['N5', 'C22', 0.0015, 0.0083, 0.0098]]
def exercise_rigid_bond_test():
"""
Results compared with THMA11 (Ver. 20-04-91) - TLS Thermal Motion
Analysis used as a part of WinGX (WinGX - Crystallographic Program
System for Windows)
"""
ins_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/enk_11i.res", test=os.path.isfile)
if (ins_file is None):
print "Skipping exercise_rigid_bond_test(): input file not available"
return
ins_xray_structure = cctbx.xray.structure.from_shelx(file=open(ins_file))
sites_frac = ins_xray_structure.sites_frac()
sites_cart = ins_xray_structure.sites_cart()
ustars = ins_xray_structure.scatterers().extract_u_star()
scatterers = ins_xray_structure.scatterers()
j = 0
for site_cart_1,site_frac_1,ustar_1,scat_1 in zip(sites_cart,sites_frac,ustars,scatterers):
for site_cart_2,site_frac_2,ustar_2, scat_2 in zip(sites_cart,sites_frac,ustars,scatterers):
d = math.sqrt(flex.sum(flex.pow2(flex.double(site_cart_1)-\
flex.double(site_cart_2))))
if(d > 1.1 and d < 1.55):
p = adp_restraints.rigid_bond_pair(site_frac_1,
site_frac_2,
ustar_1,
ustar_2,
ins_xray_structure.unit_cell())
if(0):
print "%4s %4s %7.4f %7.4f %7.4f" % \
(scat_1.label,scat_2.label,p.delta_z(),p.z_12(),p.z_21())
r = result[j]
assert r[0] == scat_1.label
assert r[1] == scat_2.label
assert approx_equal(r[2], p.delta_z(), 1.e-4)
assert approx_equal(r[3], p.z_12(), 1.e-4)
assert approx_equal(r[4], p.z_21(), 1.e-4)
j += 1
assert j == 56
def exercise_rigid_bond():
i_seqs = (1,2)
weight = 1
p = adp_restraints.rigid_bond_proxy(i_seqs=i_seqs,weight=weight)
assert p.i_seqs == i_seqs
assert p.weight == weight
sites = ((1,2,3),(2,3,4))
u_cart = ((1,2,3,4,5,6), (3,4,5,6,7,8))
expected_gradients = ((-4, -4, -4, -8, -8, -8), (4, 4, 4, 8, 8, 8))
r = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=weight)
assert r.weight == weight
assert approx_equal(r.delta_z(), -6)
assert approx_equal(r.residual(), 36)
assert approx_equal(r.gradients(), expected_gradients)
sites_cart = flex.vec3_double(((1,2,3),(2,5,4),(3,4,5)))
u_cart = flex.sym_mat3_double(((1,2,3,4,5,6),
(2,3,3,5,7,7),
(3,4,5,3,7,8)))
r = adp_restraints.rigid_bond(
adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart),
proxy=p)
assert approx_equal(r.weight, weight)
unit_cell = uctbx.unit_cell([15,25,30,90,90,90])
sites_frac = unit_cell.fractionalize(sites_cart=sites_cart)
u_star = flex.sym_mat3_double([
adptbx.u_cart_as_u_star(unit_cell, u_cart_i)
for u_cart_i in u_cart])
pair = adp_restraints.rigid_bond_pair(sites_frac[1],
sites_frac[2],
u_star[1],
u_star[2],
unit_cell)
assert approx_equal(pair.delta_z(), abs(r.delta_z()))
assert approx_equal(pair.z_12(), r.z_12())
assert approx_equal(pair.z_21(), r.z_21())
#
gradients_aniso_cart = flex.sym_mat3_double(sites_cart.size(), (0,0,0,0,0,0))
gradients_iso = flex.double(sites_cart.size(), 0)
proxies = adp_restraints.shared_rigid_bond_proxy([p,p])
params = adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart)
residuals = adp_restraints.rigid_bond_residuals(params, proxies=proxies)
assert approx_equal(residuals, (r.residual(),r.residual()))
deltas = adp_restraints.rigid_bond_deltas(params, proxies=proxies)
assert approx_equal(deltas, (r.delta_z(),r.delta_z()))
residual_sum = adp_restraints.rigid_bond_residual_sum(
params=params,
proxies=proxies,
gradients_aniso_cart=gradients_aniso_cart)
assert approx_equal(residual_sum, 2 * r.residual())
for g,e in zip(gradients_aniso_cart[1:3], r.gradients()):
assert approx_equal(g, matrix.col(e)*2)
fd_grads_aniso, fd_grads_iso = finite_difference_gradients(
restraint_type=adp_restraints.rigid_bond,
proxy=p,
sites_cart=sites_cart,
u_cart=u_cart)
for g,e in zip(gradients_aniso_cart, fd_grads_aniso):
assert approx_equal(g, matrix.col(e)*2)
#
# check frame invariance of residual
#
u_cart_1 = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01))
u_cart_2 = matrix.sym(sym_mat3=(0.21,0.32,0.11,0.02,0.02,0.07))
u_cart = (u_cart_1.as_sym_mat3(),u_cart_2.as_sym_mat3())
site_cart_1 = matrix.col((1,2,3))
site_cart_2 = matrix.col((3,1,4.2))
sites = (tuple(site_cart_1),tuple(site_cart_2))
a = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=1)
expected_residual = a.residual()
gen = flex.mersenne_twister()
for i in range(20):
R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3))
u_cart_1_rot = R * u_cart_1 * R.transpose()
u_cart_2_rot = R * u_cart_2 * R.transpose()
u_cart = (u_cart_1_rot.as_sym_mat3(),u_cart_2_rot.as_sym_mat3())
site_cart_1_rot = R * site_cart_1
site_cart_2_rot = R * site_cart_2
sites = (tuple(site_cart_1_rot),tuple(site_cart_2_rot))
a = adp_restraints.rigid_bond(
sites=sites, u_cart=u_cart,
weight=1)
assert approx_equal(a.residual(), expected_residual)
def exercise_adp_similarity():
u_cart = ((1,3,2,4,3,6),(2,4,2,6,5,1))
u_iso = (-1,-1)
use_u_aniso = (True, True)
weight = 1
a = adp_restraints.adp_similarity(
u_cart=u_cart,
weight=weight)
assert approx_equal(a.use_u_aniso, use_u_aniso)
assert a.weight == weight
assert approx_equal(a.residual(), 68)
assert approx_equal(a.gradients2(),
((-2.0, -2.0, 0.0, -8.0, -8.0, 20.0), (2.0, 2.0, -0.0, 8.0, 8.0, -20.0)))
assert approx_equal(a.deltas(), (-1.0, -1.0, 0.0, -2.0, -2.0, 5.0))
assert approx_equal(a.rms_deltas(), 2.7487370837451071)
#
u_cart = ((1,3,2,4,3,6),(-1,-1,-1,-1,-1,-1))
u_iso = (-1,2)
use_u_aniso = (True, False)
a = adp_restraints.adp_similarity(
u_cart[0], u_iso[1], weight=weight)
assert approx_equal(a.use_u_aniso, use_u_aniso)
assert a.weight == weight
assert approx_equal(a.residual(), 124)
assert approx_equal(a.gradients2(),
((-2, 2, 0, 16, 12, 24), (2, -2, 0, -16, -12, -24)))
assert approx_equal(a.deltas(), (-1, 1, 0, 4, 3, 6))
assert approx_equal(a.rms_deltas(), 3.711842908553348)
#
i_seqs_aa = (1,2) # () - ()
i_seqs_ai = (1,0) # () - o
i_seqs_ia = (3,2) # o - ()
i_seqs_ii = (0,3) # o - o
p_aa = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_aa,weight=weight)
p_ai = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ai,weight=weight)
p_ia = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ia,weight=weight)
p_ii = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ii,weight=weight)
assert p_aa.i_seqs == i_seqs_aa
assert p_aa.weight == weight
u_cart = flex.sym_mat3_double(((-1,-1,-1,-1,-1,-1),
(1,2,2,4,3,6),
(2,4,2,6,5,1),
(-1,-1,-1,-1,-1,-1)))
u_iso = flex.double((1,-1,-1,2))
use_u_aniso = flex.bool((False, True,True,False))
for p in (p_aa,p_ai,p_ia,p_ii):
params = adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
a = adp_restraints.adp_similarity(params, proxy=p)
assert approx_equal(a.weight, weight)
#
gradients_aniso_cart = flex.sym_mat3_double(u_cart.size(), (0,0,0,0,0,0))
gradients_iso = flex.double(u_cart.size(), 0)
proxies = adp_restraints.shared_adp_similarity_proxy([p,p])
residuals = adp_restraints.adp_similarity_residuals(params, proxies=proxies)
assert approx_equal(residuals, (a.residual(),a.residual()))
deltas_rms = adp_restraints.adp_similarity_deltas_rms(params, proxies=proxies)
assert approx_equal(deltas_rms, (a.rms_deltas(),a.rms_deltas()))
residual_sum = adp_restraints.adp_similarity_residual_sum(
params,
proxies=proxies,
gradients_aniso_cart=gradients_aniso_cart,
gradients_iso=gradients_iso)
assert approx_equal(residual_sum, 2 * a.residual())
fd_grads_aniso, fd_grads_iso = finite_difference_gradients(
restraint_type=adp_restraints.adp_similarity,
proxy=p,
u_cart=u_cart,
u_iso=u_iso,
use_u_aniso=use_u_aniso)
for g,e in zip(gradients_aniso_cart, fd_grads_aniso):
assert approx_equal(g, matrix.col(e)*2)
for g,e in zip(gradients_iso, fd_grads_iso):
assert approx_equal(g, e*2)
#
# check frame invariance of residual
#
u_cart_1 = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01))
u_cart_2 = matrix.sym(sym_mat3=(0.21,0.32,0.11,0.02,0.02,0.07))
u_cart = (u_cart_1.as_sym_mat3(),u_cart_2.as_sym_mat3())
u_iso = (-1, -1)
use_u_aniso = (True, True)
a = adp_restraints.adp_similarity(u_cart, weight=1)
expected_residual = a.residual()
gen = flex.mersenne_twister()
for i in range(20):
R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3))
u_cart_1_rot = R * u_cart_1 * R.transpose()
u_cart_2_rot = R * u_cart_2 * R.transpose()
u_cart = (u_cart_1_rot.as_sym_mat3(),u_cart_2_rot.as_sym_mat3())
a = adp_restraints.adp_similarity(u_cart, weight=1)
assert approx_equal(a.residual(), expected_residual)
def exercise_isotropic_adp():
i_seqs = (0,)
weight = 2
u_cart = ((1,2,3,5,2,8),)
u_iso = (0,)
use_u_aniso = (True,)
p = adp_restraints.isotropic_adp_proxy(
i_seqs=i_seqs,
weight=weight)
assert p.i_seqs == i_seqs
assert approx_equal(p.weight, weight)
i = adp_restraints.isotropic_adp(u_cart=u_cart[0], weight=weight)
expected_deltas = (-1, 0, 1, 5, 2, 8)
expected_gradients = (-4, 0, 4, 40, 16, 64)
assert approx_equal(i.weight, weight)
assert approx_equal(i.deltas(), expected_deltas)
assert approx_equal(i.rms_deltas(), 4.5704364002673632)
assert approx_equal(i.residual(), 376.0)
assert approx_equal(i.gradients(), expected_gradients)
gradients_aniso_cart = flex.sym_mat3_double(1, (0,0,0,0,0,0))
gradients_iso = flex.double(1,0)
proxies = adp_restraints.shared_isotropic_adp_proxy([p,p])
u_cart = flex.sym_mat3_double(u_cart)
u_iso = flex.double(u_iso)
use_u_aniso = flex.bool(use_u_aniso)
params = adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
residuals = adp_restraints.isotropic_adp_residuals(params, proxies=proxies)
assert approx_equal(residuals, (i.residual(),i.residual()))
deltas_rms = adp_restraints.isotropic_adp_deltas_rms(params, proxies=proxies)
assert approx_equal(deltas_rms, (i.rms_deltas(),i.rms_deltas()))
residual_sum = adp_restraints.isotropic_adp_residual_sum(
params,
proxies=proxies,
gradients_aniso_cart=gradients_aniso_cart
)
assert approx_equal(residual_sum, 752.0)
fd_grads_aniso, fd_grads_iso = finite_difference_gradients(
restraint_type=adp_restraints.isotropic_adp,
proxy=p,
u_cart=u_cart,
u_iso=u_iso,
use_u_aniso=use_u_aniso
)
for g,e in zip(gradients_aniso_cart, fd_grads_aniso):
assert approx_equal(g, matrix.col(e)*2)
#
# check frame invariance of residual
#
u_cart = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01))
a = adp_restraints.isotropic_adp(
u_cart=u_cart.as_sym_mat3(), weight=1)
expected_residual = a.residual()
gen = flex.mersenne_twister()
for i in range(20):
R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3))
u_cart_rot = R * u_cart * R.transpose()
a = adp_restraints.isotropic_adp(
u_cart=u_cart_rot.as_sym_mat3(), weight=1)
assert approx_equal(a.residual(), expected_residual)
def exercise_proxy_show():
if sys.platform.startswith("win") and sys.version_info[:2] < (2,6):
# This appears to be a windows-specific bug with string formatting
# for python versions prior to 2.6, where the exponent is printed
# with 3 digits rather than 2.
print "Skipping exercise_proxy_show()"
return
sites_cart = flex.vec3_double((
(-3.1739,10.8317,7.5653),(-2.5419,9.7567,6.6306),
(-3.3369,8.8794,4.5191),(-3.4640,9.9882,5.3896)))
site_labels = ("C1", "C2", "O16", "N8")
u_cart = flex.sym_mat3_double((
(0.0153,0.0206,0.0234,0.0035,-0.0052,-0.0051),
(0.0185,0.0109,0.0206,0.0005,-0.0010,0.0002),
(0.0295,0.0203,0.0218,-0.0010,-0.0003,-0.0044),
(0.0159,0.0154,0.0206,-0.0003,0.0004,0.0036)))
u_iso = flex.double((-1,-1,-1,-1))
use_u_aniso = flex.bool((True,True,True,True))
#
proxies = adp_restraints.shared_adp_similarity_proxy()
sio = StringIO()
proxies.show_sorted(
by_value="residual",
u_cart=flex.sym_mat3_double(),
u_iso=flex.double(),
use_u_aniso=flex.bool(),
f=sio)
assert not show_diff(sio.getvalue(), """\
ADP similarity restraints: 0
""")
proxies = adp_restraints.shared_adp_similarity_proxy([
adp_restraints.adp_similarity_proxy(i_seqs=[0,1],weight=25),
adp_restraints.adp_similarity_proxy(i_seqs=[2,3],weight=0.3)])
sio = StringIO()
proxies.show_sorted(
by_value="residual",
u_cart=u_cart,
u_iso=u_iso,
use_u_aniso=use_u_aniso,
f=sio,
prefix=":")
assert not show_diff(sio.getvalue(), """\
:ADP similarity restraints: 2
:Sorted by residual:
:scatterers 0
: 1
: delta sigma weight rms_deltas residual
: U11 -3.20e-03 2.00e-01 2.50e+01 4.96e-03 5.54e-03
: U22 9.70e-03 2.00e-01 2.50e+01
: U33 2.80e-03 2.00e-01 2.50e+01
: U12 3.00e-03 2.00e-01 2.50e+01
: U13 -4.20e-03 2.00e-01 2.50e+01
: U23 -5.30e-03 2.00e-01 2.50e+01
:scatterers 2
: 3
: delta sigma weight rms_deltas residual
: U11 1.36e-02 1.83e+00 3.00e-01 6.15e-03 1.02e-04
: U22 4.90e-03 1.83e+00 3.00e-01
: U33 1.20e-03 1.83e+00 3.00e-01
: U12 -7.00e-04 1.83e+00 3.00e-01
: U13 -7.00e-04 1.83e+00 3.00e-01
: U23 -8.00e-03 1.83e+00 3.00e-01
""")
sio = StringIO()
proxies.show_sorted(
by_value="rms_deltas",
site_labels=site_labels,
u_cart=u_cart,
u_iso=flex.double((0.024,0.031,0.021,0.028)),
use_u_aniso=flex.bool((False,False,False,False)),
f=sio,
prefix="=")
assert not show_diff(sio.getvalue(), """\
=ADP similarity restraints: 2
=Sorted by rms_deltas:
=scatterers C1
= C2
= delta sigma weight residual
= Uiso -7.00e-03 2.00e-01 2.50e+01 1.22e-03
=scatterers O16
= N8
= delta sigma weight residual
= Uiso -7.00e-03 1.83e+00 3.00e-01 1.47e-05
""")
#
proxies = adp_restraints.shared_isotropic_adp_proxy()
sio = StringIO()
proxies.show_sorted(
by_value="residual",
u_cart=flex.sym_mat3_double(),
u_iso=u_iso,
use_u_aniso=use_u_aniso,
f=sio)
assert not show_diff(sio.getvalue(), """\
Isotropic ADP restraints: 0
""")
proxies = adp_restraints.shared_isotropic_adp_proxy([
adp_restraints.isotropic_adp_proxy(i_seqs=(0,),weight=25),
adp_restraints.isotropic_adp_proxy(i_seqs=(2,),weight=0.3)])
sio = StringIO()
proxies.show_sorted(
by_value="residual",
site_labels=site_labels,
u_cart=u_cart,
u_iso=u_iso,
use_u_aniso=use_u_aniso,
f=sio,
prefix=" ")
assert not show_diff(sio.getvalue(), """\
Isotropic ADP restraints: 2
Sorted by residual:
scatterer C1
delta sigma weight rms_deltas residual
U11 -4.47e-03 2.00e-01 2.50e+01 4.27e-03 4.11e-03
U22 8.33e-04 2.00e-01 2.50e+01
U33 3.63e-03 2.00e-01 2.50e+01
U12 3.50e-03 2.00e-01 2.50e+01
U13 -5.20e-03 2.00e-01 2.50e+01
U23 -5.10e-03 2.00e-01 2.50e+01
scatterer O16
delta sigma weight rms_deltas residual
U11 5.63e-03 1.83e+00 3.00e-01 3.16e-03 2.69e-05
U22 -3.57e-03 1.83e+00 3.00e-01
U33 -2.07e-03 1.83e+00 3.00e-01
U12 -1.00e-03 1.83e+00 3.00e-01
U13 -3.00e-04 1.83e+00 3.00e-01
U23 -4.40e-03 1.83e+00 3.00e-01
""")
sio = StringIO()
proxies.show_sorted(
by_value="rms_deltas",
u_cart=u_cart,
u_iso=u_iso,
use_u_aniso=use_u_aniso,
f=sio,
prefix="$")
assert not show_diff(sio.getvalue(), """\
$Isotropic ADP restraints: 2
$Sorted by rms_deltas:
$scatterer 0
$ delta sigma weight rms_deltas residual
$ U11 -4.47e-03 2.00e-01 2.50e+01 4.27e-03 4.11e-03
$ U22 8.33e-04 2.00e-01 2.50e+01
$ U33 3.63e-03 2.00e-01 2.50e+01
$ U12 3.50e-03 2.00e-01 2.50e+01
$ U13 -5.20e-03 2.00e-01 2.50e+01
$ U23 -5.10e-03 2.00e-01 2.50e+01
$scatterer 2
$ delta sigma weight rms_deltas residual
$ U11 5.63e-03 1.83e+00 3.00e-01 3.16e-03 2.69e-05
$ U22 -3.57e-03 1.83e+00 3.00e-01
$ U33 -2.07e-03 1.83e+00 3.00e-01
$ U12 -1.00e-03 1.83e+00 3.00e-01
$ U13 -3.00e-04 1.83e+00 3.00e-01
$ U23 -4.40e-03 1.83e+00 3.00e-01
""")
#
proxies = adp_restraints.shared_rigid_bond_proxy()
sio = StringIO()
proxies.show_sorted(
by_value="residual",
sites_cart=flex.vec3_double(),
u_cart=flex.sym_mat3_double(),
f=sio)
assert not show_diff(sio.getvalue(), """\
Rigid bond restraints: 0
""")
proxies = adp_restraints.shared_rigid_bond_proxy([
adp_restraints.rigid_bond_proxy(i_seqs=(0,1),weight=25),
adp_restraints.rigid_bond_proxy(i_seqs=(0,2),weight=15),
adp_restraints.rigid_bond_proxy(i_seqs=(2,3),weight=25),
adp_restraints.rigid_bond_proxy(i_seqs=(3,1),weight=30)])
sio = StringIO()
proxies.show_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels,
u_cart=u_cart,
f=sio,
prefix="*")
assert not show_diff(sio.getvalue(), """\
*Rigid bond restraints: 4
*Sorted by residual:
*scatterers O16
* N8
* delta_z sigma weight residual
* -3.96e-03 2.00e-01 2.50e+01 3.92e-04
*scatterers C1
* C2
* delta_z sigma weight residual
* 1.08e-03 2.00e-01 2.50e+01 2.89e-05
*scatterers C1
* O16
* delta_z sigma weight residual
* 4.03e-04 2.58e-01 1.50e+01 2.44e-06
*scatterers N8
* C2
* delta_z sigma weight residual
* -1.54e-04 1.83e-01 3.00e+01 7.16e-07
""")
sio = StringIO()
proxies.show_sorted(
by_value="delta",
sites_cart=sites_cart,
u_cart=u_cart,
f=sio,
prefix="||",
max_items=2)
assert not show_diff(sio.getvalue(), """\
||Rigid bond restraints: 4
||Sorted by delta:
||scatterers 2
|| 3
|| delta_z sigma weight residual
|| -3.96e-03 2.00e-01 2.50e+01 3.92e-04
||scatterers 0
|| 1
|| delta_z sigma weight residual
|| 1.08e-03 2.00e-01 2.50e+01 2.89e-05
||... (remaining 2 not shown)
""")
def exercise():
exercise_proxy_show()
exercise_adp_similarity()
exercise_isotropic_adp()
exercise_rigid_bond()
exercise_rigid_bond_test()
print "OK"
if (__name__ == "__main__"):
exercise()
| 37.840804 | 96 | 0.631418 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.